repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Grimoors/PythonBasics | 6,133,213,312,870 | 604744aa227519b9c051e8299da8462fc9f3b68f | 057b8397e243697b333796bf21eae53bb9eeaec1 | /Python_Tute1/F5-IterationsInLists.py | 1b779c894791d385d1f5dd268893291250833fb1 | []
| no_license | https://github.com/Grimoors/PythonBasics | cfd92759452e3fb831c33acbdff5c59aa83ba441 | 6e99a8dd34073e49bc05cd32dd0a9bcacf4bcc7e | refs/heads/main | 2023-06-24T21:30:23.112947 | 2021-07-18T13:58:42 | 2021-07-18T13:58:42 | 368,498,721 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import matplotlib
import string
import dataclasses
import importlib
import random
import copy
if __name__ == "__main__":
print("Running as Standalone, Taking Input")
print("here any initialization / input as a standalone program is to be inputted")
a=[]
a.append(1)
a.append("hello")
a.append([1,"hello"])
for i in a:
print (i)
for i in range(3):
print(a[i])
for i, value in enumerate(a):
print(str(i) + str(value))
#range : for i in range(10)
#== for ( int i=0;i<10;i++)
#for i in range (1,10,0.5)
# == for (int i = 1; i < 10; i+=0.5)
# while cond:
# # statements 1
# # else:
# # statements 2
# if cond:
# statements 1
# elif cond2:
# statements 2
# else:
# statements 3
# if cond 1 and cond 2:
# if cond 3 | cond 4:
# if cond 6 == cond 5:
# print("kuch zyada ho gya")
a=[1,2,3]
print (1 in a)
b = "My name is Vivek"
print("name" in b)
b.find("name")
else:
print(f"Running as a module of another script, whose '__name__' = {__name__} , Not Taking input, Tyring to take parameterized input")
| UTF-8 | Python | false | false | 1,237 | py | 19 | F5-IterationsInLists.py | 19 | 0.531124 | 0.503638 | 0 | 64 | 18.3125 | 137 |
jied314/IQs | 7,361,573,946,553 | b5a441fcbf2273d53575a13c1836b2ff461323aa | 9752ab9bfcb2f5d058086bbc6ad8c5c435b24b91 | /tags/linked_list/reverse_nodes_in_k_groups.py | 00575a718a51de96e9270ee6604bc6f3936a4692 | []
| no_license | https://github.com/jied314/IQs | e9b99068f2585f5eca2144395588302108c267bb | e41f4ac9e99b9272ed4718680f4d12fd7443db03 | refs/heads/master | 2021-01-10T09:07:46.805287 | 2016-03-14T05:56:27 | 2016-03-14T05:56:27 | 52,819,461 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
import lib
class Solution(object):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if head is None or k == 1:
return head
dummy = ListNode(-1)
dummy.next = head
pre = dummy
node = dummy
while node.next is not None:
i = 0
while node is not None and i < k:
node = node.next
i += 1
if node is None:
break
else: # i == k
next = node.next
rh, rt = self.reverse(pre.next, next)
pre.next = rh
rt.next = next
pre = rt
node = pre
return dummy.next
def reverse(self, head, tail):
new_head, new_tail = None, head
node = head
while node != tail:
next = node.next
node.next = new_head
new_head = node
node = next
return [new_head, new_tail]
# 1/26 - borrowed from Yanxing
# Test on LC - 80ms, 32%
def reverse_k_groups_nice(self, head, k):
dummy = ListNode(-1)
dummy.next = head
pre = dummy
count = 0
while head is not None:
count += 1
if count % k == 0:
last = pre.next
next = head.next
pre.next = self.reverse_between(pre.next, head.next)
pre = last
head = next
last.next = head
else:
head = head.next
return dummy.next
# recursively reverse
def reverse_between(self, head, end):
if head is None or head.next == end:
return head
next = head.next
new_head = self.reverse_between(next, end)
next.next = head
head.next = None
return new_head
test = Solution()
ll = lib.build_ll([1,2,3,4,5])
print lib.traverse_ll(test.reverseKGroup(ll, 3)) | UTF-8 | Python | false | false | 2,176 | py | 280 | reverse_nodes_in_k_groups.py | 278 | 0.472886 | 0.463235 | 0 | 82 | 25.54878 | 68 |
anchorhong/leetcode_python | 8,486,855,424,896 | 3313c2b0f42a78d98822dc49c2914ae2f238fb31 | a5113f37e3d5c73b7abb61202c008fe62a3a38e3 | /subjects/0973_kClosest/Solution.py | 14abaf3be76019d412351f31003f9398973844b4 | []
| no_license | https://github.com/anchorhong/leetcode_python | c67e095a389734560984e2b658412ec4981e59db | 40cdc510e048164aee82a5a64a3d8e187cb75920 | refs/heads/master | 2023-01-21T09:10:55.473407 | 2020-12-01T15:25:21 | 2020-12-01T15:25:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
import heapq
from collections import defaultdict
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
if K == 0:
return []
point_dict = defaultdict(list)
max_heap = list()
for point in points:
distance = 0 - (point[0] ** 2 + point[1] ** 2)
if len(max_heap) < K:
heapq.heappush(max_heap, distance)
point_dict[distance].append(point)
if len(max_heap) >= K and distance > max_heap[0]:
max_distance = heapq.heappop(max_heap)
point_dict[max_distance].pop()
if not point_dict[max_distance]:
del point_dict[max_distance]
heapq.heappush(max_heap, distance)
point_dict[distance].append(point)
res = list()
for v in point_dict.values():
res.extend(v)
return res
if __name__ == "__main__":
# points = [[-95, 76], [17, 7], [-55, -58], [53, 20], [-69, -8], [-57, 87], [-2, -42], [-10, -87],
points = [[1,3],[-2,2]]
K = 1
print(Solution().kClosest(points, K))
| UTF-8 | Python | false | false | 1,179 | py | 94 | Solution.py | 94 | 0.506361 | 0.471586 | 0 | 36 | 31.75 | 102 |
python-kol/libkol | 16,320,875,727,228 | 913ffcf36849c3f0190ad3371c43e2602cb7f65a | d47004d217a3834140d9d11727b26b6381dbd890 | /libkol/Stat.py | a3812484b14be8e0f4b658647b0275f65afc78f0 | [
"BSD-3-Clause"
]
| permissive | https://github.com/python-kol/libkol | 9f3e1accfa25e775a53aec776e96c2eee6c39c86 | bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc | refs/heads/master | 2023-01-21T12:38:51.704144 | 2022-01-24T09:27:42 | 2022-01-24T09:27:42 | 172,836,521 | 8 | 10 | NOASSERTION | false | 2022-12-27T15:36:39 | 2019-02-27T03:31:32 | 2022-04-26T19:54:02 | 2022-12-27T15:36:38 | 18,371 | 3 | 7 | 5 | HTML | false | false | from enum import Enum
from typing import List
from .CharacterClass import CharacterClass
class Stat(Enum):
Muscle = "muscle"
Mysticality = "mysticality"
Moxie = "moxie"
def __contains__(self, character_class: CharacterClass) -> bool:
return character_class.stat == self
@property
def substats(self) -> List[str]:
if self is self.Muscle:
return [
"Beefiness",
"Fortitude",
"Muscleboundness",
"Strengthliness",
"Strongness",
]
if self is self.Mysticality:
return ["Enchantedness", "Magicalness", "Mysteriousness", "Wizardliness"]
if self is self.Moxie:
return ["Cheek", "Chutzpah", "Roguishness", "Sarcasm", "Smarm"]
return []
| UTF-8 | Python | false | false | 823 | py | 261 | Stat.py | 211 | 0.562576 | 0.562576 | 0 | 30 | 26.433333 | 85 |
JominicDones/MTMW12Assn3 | 5,394,478,956,779 | 574901a91bffe4630a7d63cf4b858e8c07cf96b3 | b2c65ffa6eaf0bedf276967e4c73b63d82ed4d48 | /IntroToProgramming/Assignment3/ass3.py | 82652c0c0242ce007b143b79b10971247d1baa20 | []
| no_license | https://github.com/JominicDones/MTMW12Assn3 | a76dfbccdb07996a1440a97a7ab9512bf77f370f | aa6c0f55c3474e37341c75bebd42649adc609856 | refs/heads/master | 2021-05-08T09:14:26.125195 | 2017-10-16T09:26:52 | 2017-10-16T09:26:52 | 107,103,212 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
File that runs code necessary for MTMW12 Assignment 3. I wasn't able to get
nearly as much done as I'd hoped as I'm still getting to grips with Python and
Git and I also missed the class and wasn't able to catch up.
Hopefully the code is styled correctly and commented okay at least.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def pressureCalc(_P_a, _P_b, _y, _L):
"""
Calculates the pressure as a function of y
Input: _P_a : pressure at ground
Input: _P_B : pressure difference at maximum height
Input: _y : height
Input: _L : characteristic length scale
Variable: pressure : the pressure as a function of height(y)
"""
pressure = np.zeros(len(_y))
for x in xrange(0, len(_y)):
pressure[x] = _P_a + _P_b * np.cos(_y[x] * np.pi / _L)
#Plot the pressure as a function of height (to check)
plt.figure(1, figsize=(10,4))
plt.plot(_y,pressure)
plt.ylabel("Pressure (Pa)")
plt.xlabel("Height (m)")
plt.title("Variation of Pressure with Height")
plt.show()
return pressure
def pressureGradient(_y, _P, N):
"""
Calculates the gradient of the pressure, dP/dy using centred, 2nd order
finite difference formula. The boundary points are calculated using
forward and backward differences.
Input: _y : height
Input: _P : pressure
Variable: delta_y : step/distance between points
Variable: P_prime : grad of pressure
"""
#Calculate the non-boundary values of pressure gradient
P_prime = np.zeros(len(_P))
delta_y = (_y[len(_y)-1] - _y[0])/N
for x in xrange(1, len(_y)-1):
P_prime[x] = (_P[x+1] - _P[x-1])/(2 * delta_y)
#Calculate the boundary values
P_prime[0] = (_P[1] - _P[0])/delta_y
P_prime[len(P_prime)-1] = (_P[len(P_prime)-1] - _P[len(P_prime)-2])/delta_y
'''
#Plot the pressure gradient as a function of height (to check)
plt.figure(2, figsize=(10,4))
plt.plot(_y,P_prime)
plt.ylabel("Pressure gradient (Pa/m)")
plt.xlabel("Height (m)")
plt.title("Variation of Pressure gradient with Height")
plt.show()
'''
return P_prime
def errPlot(_y, _P_b, _L, _P_prime, _u, _rho, _f):
"""
Plots the analytical and numerical solutions of P', and plots the errors.
Input: _y : height
Input: _P_b : pressure difference at the top
Input: _L : length scale
Input: _P_prime : numerical values of P'
Input: _u : numerically calculated geostrophic wind
Param: anal_Pprime : the analytical value of P'
Variable: errors : the error between the analytical and numerical
values of P'
Variable: Relerrors : the relative error between the analytical and numerical
values of P'
Variable: windErr : the error between the analytical and numerical
values of u
Variable: relWindErr : the relative error between the analytical and numerical
values of u
"""
#Calculate the analytical solution for P'
anal_Pprime = np.zeros(len(_P_prime))
for x in xrange(0, len(anal_Pprime)):
anal_Pprime[x] = - _P_b * (np.pi / _L) * np.sin(_y[x] * np.pi / _L)
#Plot the pressure gradient as a function of height,\
#analytical and numerical
plt.figure(3, figsize=(10,4))
plt.plot(_y,_P_prime)
plt.plot(_y,anal_Pprime)
plt.ylabel("Pressure gradient (Pa/m)")
plt.xlabel("Height (m)")
plt.title("Analytical vs. Numerical pressure gradient solutions")
plt.show()
'''
#Calculate the relative error
errors = np.zeros(len(_P_prime))
Relerrors = np.zeros(len(_P_prime))
for x in xrange(0, len(errors)):
errors[x] = abs(anal_Pprime[x] - _P_prime[x])
Relerrors[x] = 100 * errors[x] / anal_Pprime[x]
plt.figure(4, figsize=(10,4))
plt.plot(_y,errors)
plt.ylabel("relative error %")
plt.xlabel("Height (m)")
plt.title("Error of numerical solution")
plt.show()
plt.figure(5, figsize=(10,4))
plt.plot(_y,Relerrors)
plt.ylabel("relative error")
plt.xlabel("Height (m)")
plt.title("Relative error of numerical solution")
plt.show()
'''
#Calculate the analytic value for the wind
anal_u = np.zeros(len(_u))
coeff = -1 * 1/_rho * 1/_f
for x in xrange(0, len(_y)):
anal_u[x] = coeff * anal_Pprime[x]
#plot the numerical and analytical solutions for wind
plt.figure(7, figsize=(10,4))
plt.plot(_y,_u)
plt.plot(_y,anal_u)
plt.ylabel("Wind Speed")
plt.xlabel("Height (m)")
plt.title("Analytical vs. Numerical Wind")
plt.show()
#Calculate the errors on u
#Calculate the relative error
windErr = np.zeros(len(_u))
relWindErr = np.zeros(len(_u))
for x in xrange(0, len(windErr)):
windErr[x] = abs(anal_u[x] - _u[x])
relWindErr[x] = 100 * windErr[x] / anal_u[x]
plt.figure(8, figsize=(10,4))
plt.plot(_y,windErr)
plt.ylabel("absolute error")
plt.xlabel("Height (m)")
plt.title("Error of numerical solution of u")
plt.show()
plt.figure(9, figsize=(10,4))
plt.plot(_y,relWindErr)
plt.ylabel("relative error %")
plt.xlabel("Height (m)")
plt.title("Relative error of numerical solution of u")
plt.show()
def windCalc(_y, _P_prime, _f, _rho):
"""
Calculates the windspeed.
Input: _y : height
Input: _P_prime : pressure gradient
Input: _f : coriolis parameter
Input: _rho : density
Variable: u : Geostropic wind
"""
u = np.zeros(len(_y))
coeff = -1 * 1/_rho * 1/_f
for x in xrange(0, len(_y)):
u[x] = coeff * _P_prime[x]
plt.figure(6, figsize=(10,4))
plt.plot(_y,u)
plt.ylabel("Wind Speed")
plt.xlabel("Height (m)")
plt.title("Geostrophic Wind Relation")
plt.show()
return u
def main():
"""
Main function for the programme.
Param: P_a : pressure at ground
Param: P_b : pressure at maximum height
Param: f : coriolis parameter
Param: rho : density
Param: L : characteristic length scale (??)
Param: N : maximum height (meters)
Param: y: height
Variable: P : pressure
Variable: P_prime : pressure gradient
Variable: u : Geostrophic wind
"""
#Define variables
P_a = 1e5
P_b = 200
f = 1e-4
rho = 1
L = 2.4e6
N = 10
ymin = 0
ymax = 1000000
y = range(ymin, ymax + int((ymax - ymin) / N), int((ymax - ymin) / N))
#Calculate the pressure as a function of height
P = pressureCalc(P_a, P_b, y, L)
#Calculate the gradient of the pressure
P_prime = pressureGradient(y,P,N)
#Calculate and plot the Geostrophic Wind Relation
u = windCalc(y, P_prime, f, rho)
#Plot the differnce between the analytical and numerical solutions for P'
errPlot(y,P_b,L,P_prime, u, rho, f)
'''
The error is bigger at the boundaries, where the finite difference formula
is only 1st order accurate, as opposed to 2nd.
An experiment to test this would be to measure the error relative to the
step size and see if it scales as the square of the order of accuracy.
(2) A more accurate differentiation scheme would be a 4th order accurate
central difference scheme.
'''
main()
| UTF-8 | Python | false | false | 7,703 | py | 1 | ass3.py | 1 | 0.588083 | 0.57549 | 0 | 248 | 29.060484 | 82 |
Syncano/syncano-python | 8,796,093,040,712 | 66c3544d7a5a9df12ec702c80c74c91874e89eea | 4a83828a1e230582e6d41321da8984674c34cea7 | /tests/test_incentives.py | 6b2b3f41c9b2c421bbf393ab157328a0d611a7d6 | []
| no_license | https://github.com/Syncano/syncano-python | af0b20cd8d79cf6139d989dab2b780d73218d4c3 | 3a1cff87a565a075ca6f54bfe55089bb152fdbf3 | refs/heads/develop | 2021-03-24T10:27:20.994762 | 2016-11-17T14:45:27 | 2016-11-17T14:45:27 | 16,406,574 | 4 | 4 | null | false | 2016-11-17T14:53:11 | 2014-01-31T11:46:19 | 2016-10-28T14:26:45 | 2016-11-17T14:53:11 | 12,305 | 5 | 4 | 5 | Python | null | null | # -*- coding: utf-8 -*-
import json
import unittest
from datetime import datetime
from syncano.exceptions import SyncanoValidationError
from syncano.models import ResponseTemplate, Script, ScriptEndpoint, ScriptEndpointTrace, ScriptTrace
try:
from unittest import mock
except ImportError:
import mock
class ScriptTestCase(unittest.TestCase):
def setUp(self):
self.model = Script()
@mock.patch('syncano.models.Script._get_connection')
def test_run(self, connection_mock):
model = Script(instance_name='test', id=10, links={'run': '/v1.1/instances/test/snippets/scripts/10/run/'})
connection_mock.return_value = connection_mock
connection_mock.request.return_value = {'id': 10}
self.assertFalse(connection_mock.called)
self.assertFalse(connection_mock.request.called)
result = model.run(a=1, b=2)
self.assertTrue(connection_mock.called)
self.assertTrue(connection_mock.request.called)
self.assertIsInstance(result, ScriptTrace)
connection_mock.assert_called_once_with(a=1, b=2)
call_args = connection_mock.request.call_args[0]
call_kwargs = connection_mock.request.call_args[1]
call_kwargs['data']['payload'] = json.loads(call_kwargs['data']['payload'])
self.assertEqual(('POST', '/v1.1/instances/test/snippets/scripts/10/run/'), call_args)
self.assertDictEqual(call_kwargs['data'], {'payload': {"a": 1, "b": 2}})
model = Script()
with self.assertRaises(SyncanoValidationError):
model.run()
class ScriptEndpointTestCase(unittest.TestCase):
def setUp(self):
self.model = ScriptEndpoint()
@mock.patch('syncano.models.ScriptEndpoint._get_connection')
def test_run(self, connection_mock):
model = ScriptEndpoint(instance_name='test', name='name',
links={'run': '/v1.1/instances/test/endpoints/scripts/name/run/'})
connection_mock.return_value = connection_mock
connection_mock.request.return_value = {
'status': 'success',
'duration': 937,
'result': {'stdout': 1, 'stderr': ''},
'executed_at': '2015-03-16T11:52:14.172830Z'
}
self.assertFalse(connection_mock.called)
self.assertFalse(connection_mock.request.called)
result = model.run(x=1, y=2)
self.assertTrue(connection_mock.called)
self.assertTrue(connection_mock.request.called)
self.assertIsInstance(result, ScriptEndpointTrace)
self.assertEqual(result.status, 'success')
self.assertEqual(result.duration, 937)
self.assertEqual(result.result, {'stdout': 1, 'stderr': ''})
self.assertIsInstance(result.executed_at, datetime)
connection_mock.assert_called_once_with(x=1, y=2)
connection_mock.request.assert_called_once_with(
'POST',
'/v1.1/instances/test/endpoints/scripts/name/run/',
data={"y": 2, "x": 1}
)
model = ScriptEndpoint()
with self.assertRaises(SyncanoValidationError):
model.run()
class ResponseTemplateTestCase(unittest.TestCase):
def setUp(self):
self.model = ResponseTemplate
@mock.patch('syncano.models.ResponseTemplate._get_connection')
def test_render(self, connection_mock):
model = self.model(instance_name='test', name='name',
links={'run': '/v1.1/instances/test/snippets/templates/name/render/'})
connection_mock.return_value = connection_mock
connection_mock.request.return_value = '<div>12345</div>'
self.assertFalse(connection_mock.called)
self.assertFalse(connection_mock.request.called)
response = model.render()
self.assertTrue(connection_mock.called)
self.assertTrue(connection_mock.request.called)
self.assertEqual(response, '<div>12345</div>')
connection_mock.request.assert_called_once_with(
'POST',
'/v1.1/instances/test/snippets/templates/name/render/',
data={'context': {}}
)
| UTF-8 | Python | false | false | 4,125 | py | 76 | test_incentives.py | 61 | 0.646545 | 0.628848 | 0 | 106 | 37.915094 | 115 |
kazishimulbillah/SeleniumPython | 17,523,466,580,856 | c54841fcb840d1a33b584b5fb99e12b1628e1c86 | 98561bb45e3c6f84ed55113dd1a9082b3ddf90f4 | /SampleProjects/POMDemo/Pages/LoginPage.py | 2047a63b09bf44b0ea4f437b564f69f9a21bed54 | []
| no_license | https://github.com/kazishimulbillah/SeleniumPython | 0440c12904c9571a83677a168b7b43c91c5e878b | 0e80fd153530529d63808ffa6d8c9cc88351c71e | refs/heads/master | 2023-03-12T12:45:52.044376 | 2021-03-02T05:19:05 | 2021-03-02T05:19:05 | 343,652,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
from selenium.webdriver.common.by import By
from SampleProjects.POMDemo.Locators.locators import Locators
class LoginPages:
def __init__(self, driver):
self.driver = driver
self.login_Button = '//*[@id="js--main-header"]/div/div/div[3]/div/div[2]/a'
self.user_name = "j_username"
self.password = "j_password"
self.sign_in_button = '//*[@id="loginForm"]/button'
def click_sign_in_button(self):
time.sleep(2)
self.driver.find_element(By.XPATH, Locators.login_Button).click()
def enter_user_name(self, username):
self.driver.find_element_by_id(Locators.user_name).clear
time.sleep(2)
self.driver.find_element_by_id(Locators.user_name).send_keys(username)
def enter_password(self, password):
self.driver.find_element_by_id(Locators.password).clear
time.sleep(2)
self.driver.find_element_by_id(Locators.password).send_keys(password)
def click_login_button(self):
time.sleep(2)
self.driver.find_element(By.XPATH, Locators.sign_in_button).click()
| UTF-8 | Python | false | false | 1,047 | py | 4 | LoginPage.py | 4 | 0.69341 | 0.687679 | 0 | 35 | 28.914286 | 84 |
cherylzh0110/AgeEstimationApp | 9,921,374,468,883 | 59f3df3282274b74e5d7cc2ee15695e426a78896 | 40e94229e05aeea9854bd17713707c22d704d9b8 | /utkcamera.py | 80f2ca1486a622b9c245e82019a4111a962e3276 | []
| no_license | https://github.com/cherylzh0110/AgeEstimationApp | 6a273004b0e6480e5bbcfe9cea5734c521c9a13e | c108f03b5acfba04c017d019780b2f48eef8af47 | refs/heads/master | 2022-12-22T07:41:44.649150 | 2020-09-15T11:37:53 | 2020-09-15T11:37:53 | 181,218,238 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
from keras.models import Sequential, load_model
import numpy as np
facec = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
def get_model():
global model
model = load_model('./models/utk.h5')
model._make_predict_function()
font = cv2.FONT_HERSHEY_SIMPLEX
print(" * Loading Keras model...")
get_model()
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
# returns camera frames along with bounding boxes and predictions
def get_frame(self):
success, fr = self.video.read()
faces = facec.detectMultiScale(fr, 1.3, 5)
for (x, y, w, h) in faces:
fc = fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (64, 64))
roi = np.expand_dims(roi, axis=0)
#print(roi)
prediction1 = model.predict(roi) #[[0.00000e+00 0.00000e+00 2.03737e-20 1.00000e+00 0.00000e+00 0.00000e+000.00000e+00 0.00000e+00]]
#print(prediction1)
ages = np.arange(0, 101).reshape(101, 1)
predicted_ages = prediction1[1].dot(ages).flatten()
#print(predicted_ages)
pred = str(int(predicted_ages[0]))
#pred = str(a[0] * 1 + a[1] * 4.5 + a[2] * 10.5 + a[3] * 19 + a[4] * 29 + a[5] * 40 + a[6] * 53 + a[7] * 70)
print(pred)
#pred = model.predict(roi[np.newaxis, :, :, np.newaxis])
#print(pred) #[[0.00000e+00 0.00000e+00 2.03737e-20 1.00000e+00 0.00000e+00 0.00000e+000.00000e+00 0.00000e+00]]
#pred = str(a[0] * 1 + a[1] * 4.5 + a[2] * 10.5 + a[3] * 19 + a[4] * 29 + a[5] * 40 + a[6] * 53 + a[7] * 70)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(fr, str(pred), (250, 450), cv2.FONT_HERSHEY_COMPLEX,1, (0,0,255))
ret, jpeg = cv2.imencode('.jpg', fr)
#OpenCV captures rare image, so images need to be encoded into jpg to correctly display the video stream
return jpeg.tobytes()
| UTF-8 | Python | false | false | 2,073 | py | 24 | utkcamera.py | 17 | 0.558128 | 0.449108 | 0 | 45 | 44.066667 | 144 |
jplobianco/bolao | 11,656,541,280,630 | cc1499f3c42755f592dec82e67126756b2b364ae | 3b07106c1148b23cba4889f754da2fc0bb33b157 | /principal/migrations/0004_auto_20190625_1853.py | 0319710091b257f31a865fbc5df11bbc82ae0165 | []
| no_license | https://github.com/jplobianco/bolao | 2fe130e1a50104f4d921bda0495df522c3d73608 | bc24aab6cb57ddabe88d26562962507854ad94e1 | refs/heads/master | 2020-03-21T22:56:54.142421 | 2019-10-23T16:19:31 | 2019-10-23T16:19:31 | 139,157,063 | 0 | 0 | null | false | 2020-07-22T13:48:34 | 2018-06-29T14:09:31 | 2020-07-22T13:46:46 | 2020-07-22T13:37:31 | 378 | 0 | 0 | 3 | Python | false | false | # Generated by Django 2.2.2 on 2019-06-25 18:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('principal', '0003_aposta_pontuacao_ganha'),
]
operations = [
migrations.AlterModelOptions(
name='bolao',
options={'ordering': ['nome'], 'verbose_name': 'Bolão', 'verbose_name_plural': 'Bolões'},
),
]
| UTF-8 | Python | false | false | 410 | py | 24 | 0004_auto_20190625_1853.py | 14 | 0.598039 | 0.551471 | 0 | 17 | 23 | 101 |
mrugesh-bannatwale/ninja_liota | 8,589,971,517 | df5c6de7312cb5430b658e8aae18315c53ab3221 | b7039d789868458f72f230b9110f74a992411714 | /liota/build/lib.linux-x86_64-2.7/liota/lib/transports/amqp.py | c56330038b322929ba3eea4afe6fe8f1df251771 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
]
| permissive | https://github.com/mrugesh-bannatwale/ninja_liota | e8304b07889f7426d94edb776d2a5064e109bfc1 | 3751f67960d1865995160281e0494ad41e22da0f | refs/heads/master | 2020-12-02T19:49:59.132124 | 2017-07-21T06:34:16 | 2017-07-21T06:34:16 | 96,396,099 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------#
# Copyright © 2015-2016 VMware, Inc. All Rights Reserved. #
# #
# Licensed under the BSD 2-Clause License (the “License”); you may not use #
# this file except in compliance with the License. #
# #
# The BSD 2-Clause License #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met:#
# #
# - Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #
# THE POSSIBILITY OF SUCH DAMAGE. #
# ----------------------------------------------------------------------------#
import logging
import os
import ssl
from numbers import Number
from threading import Thread
from kombu import Connection, Producer, Exchange, pools, binding
from kombu import Queue as KombuQueue
from kombu.pools import connections
from kombu.mixins import ConsumerMixin
from liota.lib.utilities.utility import systemUUID
log = logging.getLogger(__name__)
PROTOCOL_VERSION = "0.9.1"
# One connection for producer and one for consumer
CONNECTION_POOL_LIMIT = 2
pools.set_limit(CONNECTION_POOL_LIMIT)
EXCHANGE_TYPES = ["direct", "topic", "fanout", "headers"]
DEFAULT_EXCHANGE_TYPE = "topic"
DEFAULT_PUBLISH_PROPERTIES = {"content_type": "application/json",
"delivery_mode": 1,
"headers": None
}
'''
Utility functions for AMQP
'''
def auto_generate_exchage_name(edge_system_name):
"""
Auto-generates exchange_name for a given edge_system_name
:param edge_system_name: EdgeSystemName
:return: Auto-generated exchange_name
"""
return "liota.exchange." + systemUUID().get_uuid(edge_system_name)
def auto_generate_routing_key(edge_system_name, for_publish=True):
"""
Auto-generates routing_key for publisher and consumer for a given edge_system_name
:param edge_system_name: EdgeSystemName
:param for_publish: True for publisher and False for consumer
:return: Auto-generated routing_key
"""
if for_publish:
return "liota." + systemUUID().get_uuid(edge_system_name) + ".request"
else:
return "liota." + systemUUID().get_uuid(edge_system_name) + ".response"
def auto_generate_queue_name(edge_system_name):
"""
Auto-generates queue_name for a given edge_system name
:param edge_system_name: EdgeSystemName
:return: Auto-generated queue_name
"""
return "liota.queue." + systemUUID().get_uuid(edge_system_name)
class Amqp:
"""
AMQP Protocol (version 0.9.1) implementation in LIOTA. It uses Python Kombu library.
"""
def __init__(self, url, port, identity=None, tls_conf=None, enable_authentication=False,
connection_timeout_sec=10):
"""
:param url: AMQP Broker URL or IP
:param port: port number of broker
:param identity: Identity Object
:param tls_conf: TLSConf object
:param bool enable_authentication: Enable username/password based authentication or not
:param connection_timeout_sec: Connection Timeout in seconds
"""
self.url = url
self.port = port
self.identity = identity
self.tls_conf = tls_conf
self.enable_authentication = enable_authentication
self.connection_timeout_sec = connection_timeout_sec
'''
From kombu's documentation:
This is a pool group, which means you give it a connection instance, and you get a pool instance back.
We have one pool per connection instance to support multiple connections in the same app.
All connection instances with the same connection parameters will get the same pool.
'''
self._connection_pool = None
'''
# Separate connection instances for publishing and consuming will be acquired from connection pool.
# In-case of AMQP broker(ex: RabbitMQ) overload, publishing connection can get blocked for some-time.
# So, having separate connection for publishing prevents consumers from being blocked.
'''
self._publisher_connection = None
self._consumer_connection = None
# Single channel for publishing to exchanges
self._publisher_channel = None
self._producer = None
self._consumer = None
self._init_or_re_init()
log.info("Initialized AMQP (version {0}) transports".format(PROTOCOL_VERSION))
def _init_or_re_init(self):
"""
Initialize or re-initialize connection pool and producer.
:return:
"""
self.disconnect()
self.connect_soc()
self._initialize_producer()
def _initialize_producer(self):
"""
Acquires connection from connection pool for Producer and initializes Producer.
:return:
"""
self._publisher_connection = self._connection_pool.acquire()
# Single publish_channel to publish all metrics from an edge_system
self._publisher_channel = self._publisher_connection.channel()
self._producer = Producer(self._publisher_channel)
def _initialize_consumer_connection(self):
"""
Acquires connection from connection pool for Consumer
:return:
"""
try:
self._consumer_connection = self._connection_pool.acquire()
except Exception:
log.exception("Exception while consume traceback...")
if self._consumer:
log.error("Consumer already started..")
raise Exception("Consumer already started..")
def connect_soc(self):
"""
Establishes connection with broker and initializes the connection pool
:return:
"""
# Use credentials if authentication is enabled
if self.enable_authentication:
if not self.identity.username:
log.error("Username not found")
raise ValueError("Username not found")
elif not self.identity.password:
log.error("Password not found")
raise ValueError("Password not found")
# TLS setup
if self.tls_conf:
# Validate CA certificate path
if self.identity.root_ca_cert:
if not (os.path.exists(self.identity.root_ca_cert)):
log.error("Error : Wrong CA certificate path.")
raise ValueError("Error : Wrong CA certificate path.")
else:
log.error("Error : Wrong CA certificate path.")
raise ValueError("Error : CA certificate path is missing")
# Validate client certificate path
if self.identity.cert_file:
if os.path.exists(self.identity.cert_file):
client_cert_available = True
else:
log.error("Error : Wrong client certificate path.")
raise ValueError("Error : Wrong client certificate path.")
else:
client_cert_available = False
# Validate client key file path
if self.identity.key_file:
if os.path.exists(self.identity.key_file):
client_key_available = True
else:
log.error("Error : Wrong client key path.")
raise ValueError("Error : Wrong client key path.")
else:
client_key_available = False
'''
Certificate Validations:
# 1. If client certificate is not present throw error
# 2. If client key is not present throw error
If both client certificate and keys are not available, proceed with root CA
'''
if not client_cert_available and client_key_available:
log.error("Error : Client key found, but client certificate not found")
raise ValueError("Error : Client key found, but client certificate not found")
if client_cert_available and not client_key_available:
log.error("Error : Client key found, but client certificate not found")
raise ValueError("Error : Client certificate found, but client key not found")
# Setup ssl options
ssl_details = {'ca_certs': self.identity.root_ca_cert,
'certfile': self.identity.cert_file,
'keyfile': self.identity.key_file,
'cert_reqs': getattr(ssl, self.tls_conf.cert_required),
'ssl_version': getattr(ssl, self.tls_conf.tls_version),
'ciphers': self.tls_conf.cipher}
try:
'''
Establish connection with one of the following:
a) Certificate based authorization
b) Certificate based authorization and username/password based authentication
c) Username/password based authentication
d) Plain AMQP
'''
amqp_connection = Connection(hostname=self.url, port=self.port, transport="pyamqp",
userid=self.identity.username if self.enable_authentication else None,
password=self.identity.password if self.enable_authentication else None,
ssl=ssl_details if self.tls_conf else False,
connect_timeout=self.connection_timeout_sec)
self._connection_pool = connections[amqp_connection]
except Exception, e:
log.exception("AMQP connection exception traceback..")
raise e
def declare_publish_exchange(self, pub_msg_attr):
"""
Declares an Exchange to which messages will be published.
:param pub_msg_attr: AmqpPublishMessagingAttributes
:return:
"""
if not isinstance(pub_msg_attr, AmqpPublishMessagingAttributes):
log.error("pub_msg_attr must be of type AmqpPublishMessagingAttributes")
raise TypeError("pub_msg_attr must be of type AmqpPublishMessagingAttributes")
exchange = Exchange(name=pub_msg_attr.exchange_name, type=pub_msg_attr.exchange_type)
# binding exchange with channel
exchange.durable = pub_msg_attr.exchange_durable
# delivery_mode at exchange level is transient
# However, publishers can publish messages with delivery_mode persistent
exchange.delivery_mode = 1
bound_exchange = exchange(self._publisher_channel)
# declaring exchange on broker
bound_exchange.declare()
pub_msg_attr.is_exchange_declared = True
log.info("Declared Exchange: Name: {0}, Type: {1}, Durability: {2}".
format(pub_msg_attr.exchange_name, pub_msg_attr.exchange_type, pub_msg_attr.exchange_durable))
def publish(self, exchange_name, routing_key, message, properties=DEFAULT_PUBLISH_PROPERTIES):
"""
Published message to the broker.
:param exchange_name: Exchange name
:type exchange_name: str or unicode
:param routing_key: Routing key for binding
:type routing_key: str or unicode
:param message: Message to be published
:type message: str or unicode
:param bool exchange_durable: Value to survive exchange on RabbitMQ reboot
:return:
"""
try:
self._producer.publish(body=message, exchange=exchange_name, routing_key=routing_key,
content_type=properties['content_type'],
delivery_mode=properties['delivery_mode'],
headers=properties['headers'])
log.info("Published to exchange: {0} with routing-key: {1}".format(exchange_name, routing_key))
except Exception:
log.exception("AMQP publish exception traceback...")
def consume(self, consume_msg_attr_list):
"""
Starts ConsumerWorkerThread if not started already
:param consume_msg_attr_list: List of AmqpConsumeMessagingAttributes
:return:
"""
if not isinstance(consume_msg_attr_list, list):
log.error("consume_msg_attr_list must be of type list")
raise TypeError("consume_msg_attr_list must be of type list")
self._initialize_consumer_connection()
kombu_queues = []
callbacks = []
prefetch_size_list = []
prefetch_count_list = []
for consume_msg_attr in consume_msg_attr_list:
exchange = Exchange(name=consume_msg_attr.exchange_name, type=consume_msg_attr.exchange_type)
exchange.durable = consume_msg_attr.exchange_durable
# delivery_mode at exchange level is transient
# However, publishers can publish messages with delivery_mode persistent
exchange.delivery_mode = 1
if not 'headers' == consume_msg_attr.exchange_type:
kombu_queue = KombuQueue(name=consume_msg_attr.queue_name,
# A queue can be bound with an exchange with one or more routing keys
# creating a binding between exchange and routing_key
bindings=[binding(exchange=exchange, routing_key=_)
for _ in consume_msg_attr.routing_keys
]
)
else:
kombu_queue = KombuQueue(name=consume_msg_attr.queue_name,
exchange=exchange,
binding_arguments=consume_msg_attr.header_args
)
kombu_queue.durable = consume_msg_attr.queue_durable
kombu_queue.exclusive = consume_msg_attr.queue_exclusive
kombu_queue.auto_delete = consume_msg_attr.queue_auto_delete
kombu_queue.no_ack = consume_msg_attr.queue_no_ack
kombu_queues.append(kombu_queue)
callbacks.append(consume_msg_attr.callback)
prefetch_size_list.append(consume_msg_attr.prefetch_size)
prefetch_count_list.append(consume_msg_attr.prefetch_count)
self._consumer = ConsumerWorkerThread(self._consumer_connection, kombu_queues, callbacks,
prefetch_size_list, prefetch_count_list)
def disconnect_consumer(self):
"""
Stop consumer thread and disconnects consumer connection from Broker
:return:
"""
if self._consumer and self._consumer_connection:
self._consumer.stop()
self._consumer = None
self._consumer_connection.release()
self._consumer_connection = None
def disconnect_producer(self):
"""
Disconnects publisher connection from Broker
:return:
"""
if self._publisher_connection:
self._publisher_connection.release()
self._publisher_connection = None
self._producer = None
def disconnect(self):
"""
Disconnect client from broker
:return:
"""
try:
self.disconnect_producer()
self.disconnect_consumer()
pools.reset()
self._connection_pool = None
except Exception, e:
log.exception("AMQP disconnect exception traceback..")
raise e
class AmqpPublishMessagingAttributes:
"""
Encapsulates Messaging attributes related to AMQP Publish
"""
def __init__(self, edge_system_name=None, exchange_name=None, exchange_type=DEFAULT_EXCHANGE_TYPE,
exchange_durable=False, routing_key=None, msg_delivery_mode=1, header_args=None):
"""
:param edge_system_name: EdgeSystem Name.
If provided, exchange_name, routing_keys and queue_name will be auto-generated.
:param exchange_name: Exchange Name
:param exchange_type: Exchange Type
Supported types are: "direct", "topic", "fanout", "headers"
:param exchange_durable: Exchange durable or not
:param routing_key: Used when exchange type is one of "direct", "topic", "fanout"
Routing Key based on which a particular message should be routed.
:param msg_delivery_mode: 1 -> transient
2 -> persistent
:param header_args: Used when exchange_type is 'headers'
Must be of type dict. Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument named “x-match” determines the
matching algorithm, where “all” implies an AND (all pairs must match) and “any” implies OR
(at least one pair must match).
"""
if edge_system_name:
self.exchange_name = auto_generate_exchage_name(edge_system_name)
self.routing_key = auto_generate_routing_key(edge_system_name, for_publish=True)
log.info("Auto-generated exchange_name: {0} and routing_key: {1}".
format(self.exchange_name, self.routing_key))
else:
# routing_key can be None for exchange of type 'headers'
if not 'headers' == exchange_type and routing_key is None:
log.error("routing_key must be non empty character sequence for exchange types other than 'headers'")
raise ValueError("routing_key must be non empty character sequence"
" for exchange types other than 'headers'")
self.exchange_name = exchange_name
self.routing_key = routing_key
if exchange_type not in EXCHANGE_TYPES:
log.error("Unsupported exchange-type: {0}".format(str(exchange_type)))
raise TypeError("Unsupported exchange-type: {0}".format(str(exchange_type)))
if 'headers' == exchange_type and not isinstance(header_args, dict):
log.error("For exchange_type `headers`, header_args must be of type dict")
raise ValueError("For exchange_type `headers`, header_args must be of type dict")
if not isinstance(msg_delivery_mode, Number) or msg_delivery_mode not in range(1, 3):
log.error("msg_delivery_mode must be a Number (1 or 2)")
raise ValueError("msg_delivery_mode must be a Number (1 or 2)")
self.exchange_type = exchange_type
# Exchange should survive broker reboot or not
self.exchange_durable = exchange_durable
# Exchange declared or not
self.is_exchange_declared = False
if self.exchange_name is None:
# Since exchange_name is None, exchange declared at edge_system level will be used.
# Hence, marking exchange as declared.
# Mode 2: Single exchange and different routing-keys for metrics published from an edge_system
log.warn("exchange_name is None. exchange declared at edge_system level will be used")
self.is_exchange_declared = True
self.properties = DEFAULT_PUBLISH_PROPERTIES
if 2 == msg_delivery_mode:
self.properties["delivery_mode"] = 2
if header_args is not None:
self.properties["headers"] = header_args
class AmqpConsumeMessagingAttributes:
"""
Encapsulates Messaging attributes related to AMQP Consume
"""
def __init__(self, edge_system_name=None, exchange_name=None, exchange_type=DEFAULT_EXCHANGE_TYPE,
exchange_durable=False, queue_name=None, queue_durable=False, queue_auto_delete=True,
queue_exclusive=False, routing_keys=None, queue_no_ack=False, prefetch_size=0, prefetch_count=0,
callback=None, header_args=None):
"""
:param edge_system_name: EdgeSystem Name.
If provided, exchange_name, routing_keys and queue_name will be auto-generated.
:param exchange_name: Exchange Name
:param exchange_type: Exchange Type
Supported types are: "direct", "topic", "fanout", "headers"
:param exchange_durable: Exchange durable or not
:param queue_name: Queue Name.
Will be auto-generated if edge_system_name is provided. If edge_system_name is not provided,
users have choice to provide their own queue name or leave it to the AMQP broker to assign
and auto-generated name
:param queue_durable: Queue durable or not.
:param queue_auto_delete: Queue auto-delete or not.
:param queue_exclusive: Queue exclusive or not
:param routing_keys: List of routing keys.
A queue can be bound with an exchange with one or more routing keys
Used when exchange type is one of "direct", "topic", "fanout".
:param queue_no_ack: Queue should expect ACK or not
:param prefetch_size: Specify the prefetch window in octets. The server will send a message in advance if it is
equal to or smaller in size than the available prefetch size (and also falls within other
prefetch limits). May be set to zero, meaning “no specific limit”, although other prefetch
limits may still apply.
:param prefetch_count: Specify the prefetch window in terms of whole messages
:param callback: Callback method to be invoked.
Method's signature must be method(body, message)
body -> message body
message -> kombu Message object
:param header_args: Used when exchange_type is 'headers'
Must be of type dict. Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument named “x-match” determines the
matching algorithm, where “all” implies an AND (all pairs must match) and “any” implies OR
(at least one pair must match).
"""
if edge_system_name:
self.exchange_name = auto_generate_exchage_name(edge_system_name)
self.routing_keys = [auto_generate_routing_key(edge_system_name, for_publish=False)]
self.queue_name = auto_generate_queue_name(edge_system_name)
log.info("Auto-generated exchange_name: {0}, routing_keys: {1} and queue_name: {2}".
format(self.exchange_name, str(self.routing_keys), self.queue_name))
else:
if not 'headers' == exchange_type and routing_keys is None:
log.error("routing_key must be non empty character sequence for exchange types other than 'headers'")
raise ValueError("routing_key must be non empty character sequence"
" for exchange types other than 'headers'")
if 'headers' == exchange_type and not isinstance(header_args, dict):
log.error("For exchange_type `headers`, header_args must be of type dict")
raise ValueError("For exchange_type `headers`, header_args must be of type dict")
routing_keys = routing_keys if routing_keys else []
if not isinstance(routing_keys, list):
log.error("routing_keys must be of type list")
raise TypeError("routing_keys must be of type list")
self.exchange_name = exchange_name
self.routing_keys = routing_keys
self.queue_name = queue_name
if exchange_type not in EXCHANGE_TYPES:
log.error("Unsupported exchange-type: {0}".format(str(exchange_type)))
raise TypeError("Unsupported exchange-type: {0}".format(str(exchange_type)))
self.exchange_type = exchange_type
self.exchange_durable = exchange_durable
self.queue_durable = queue_durable
self.queue_auto_delete = queue_auto_delete
self.queue_exclusive = queue_exclusive
self.queue_no_ack = queue_no_ack
self.prefetch_size = prefetch_size
self.prefetch_count = prefetch_count
self.callback = callback
self.header_args = header_args
class AmqpConsumerWorker(ConsumerMixin):
"""
Implementation of Kombu's ConsumerMixin class.
ConsumerMixin.run() is a blocking call and should be invoked from a separate thread.
"""
def __init__(self, connection, queues, callbacks, prefetch_size_list, prefetch_count_list):
"""
:param connection: Kombu Connection object
:param queues: list of Queues to consume from
:param callbacks: list of callbacks for corresponding queues
:param prefetch_size_list: list of prefetch_size for Consumers that consume from corresponding queues
:param prefetch_count_list: list of prefetch_count for Consumers that consume from corresponding queues
"""
if not isinstance(connection, Connection):
log.error("connection must be of type: {0}".format(str(type(Connection))))
raise TypeError("connection must be of type: {0}".format(str(type(Connection))))
if not isinstance(queues, list) or not isinstance(callbacks, list):
log.error("queues and connections must be of type list")
raise TypeError("queues and connections must be of type list")
# ConsumerMixin class expects 'connection' attribute
self.connection = connection
self.queues = queues
self.callbacks = callbacks if len(callbacks) > 0 else [self.on_message]
self.prefetch_size_list = prefetch_size_list
self.prefetch_count_list = prefetch_count_list
def get_consumers(self, Consumer, channel):
"""
Implementation of get_consumers() of ConsumerMixin class.
This method is invoked by ConsumerMixin's internal methods.
:param Consumer: kombu.Consumer
:param channel: kombu.Channel
:return: list of :class:`kombu.Consumer` instances to use.
"""
kombu_consumer_list = []
for _ in range(0, len(self.queues)):
# consumer class expects queues, callbacks and accept as list
kombu_consumer = Consumer(queues=[self.queues[_]],
# making self.on_message() as callback if callback is None
callbacks=[self.callbacks[_]] if self.callbacks[_] else [self.on_message],
accept=['json', 'pickle', 'msgpack', 'yaml']
)
kombu_consumer.qos(prefetch_size=self.prefetch_size_list[_],
prefetch_count=self.prefetch_count_list[_],
apply_global=False
)
kombu_consumer_list.append(kombu_consumer)
return kombu_consumer_list
def on_message(self, body, message):
"""
Default callback method for AMQP Consumers. It simply logs the message and sends ACK
This callback will be used if user doesn't provide any callback
:param body: message body
:param message: Kombu Message object.
:return:
"""
log.info('Got message: {0}'.format(body))
message.ack()
class ConsumerWorkerThread(Thread):
"""
WorkerThread for AmqpConsumerWorker.
This worker cannot inherit from both Thread and ConsumerMixin class because of ConsumerMixin's run().
"""
def __init__(self, connection, queues, callbacks, prefetch_size_list, prefetch_count_list):
"""
:param connection: Kombu Connection object
:param queues: list of Queues to consume from
:param callbacks: list of callbacks for corresponding queues
:param prefetch_size_list: list of prefetch_size for Consumers that consume from corresponding queues
:param prefetch_count_list: list of prefetch_count for Consumers that consume from corresponding queues
"""
Thread.__init__(self)
self._connection = connection
self._queues = queues
self._callbacks = callbacks
self._prefetch_size_list = prefetch_size_list
self._prefetch_count_list = prefetch_count_list
self.daemon = True
self._consumer = None
self.start()
def run(self):
"""
run() for ConsumerWorkerThread. This initializes AmqpConsumerWorker and invokes its run() method.
This method returns when AmqpConsumerWorker.should_stop is set to True
:return:
"""
try:
self._consumer = AmqpConsumerWorker(self._connection, self._queues, self._callbacks,
self._prefetch_size_list, self._prefetch_count_list)
self._consumer.run()
log.info("Started AmqpConsumerWorker...")
except Exception:
log.exception("Exception traceback in ConsumerWorkerThread...")
self.stop()
def stop(self):
"""
Stop ConsumerWorkerThread, AmqpConsumerWorker and its associated Consumers
:return:
"""
# Stops AmqpConsumerWorker and its associated Consumers
if self._consumer:
self._consumer.should_stop = True
log.info("Stopped AmqpConsumerWorker..")
else:
log.info("AmqpConsumerWorker is already stopped..")
| UTF-8 | Python | false | false | 31,875 | py | 19 | amqp.py | 14 | 0.595 | 0.593147 | 0 | 684 | 45.55117 | 120 |
weiju/c64-stuff | 2,757,369,006,517 | 79dd4c9d68dec72372dbfaa252a8030a385a7cd4 | cd0cafdf281e808abadfefa1fc3306bf90cbce3f | /d64.py | fa2c60bef733dd7be96981489bc02a8ea713b202 | []
| no_license | https://github.com/weiju/c64-stuff | 554b5548d59cf75436b4326f939f98501d2d6824 | 419e4648011eb3c687260dbd798a730baacb3ae9 | refs/heads/master | 2016-09-16T11:46:55.822562 | 2015-07-10T04:45:16 | 2015-07-10T04:45:16 | 35,858,088 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import argparse
BLOCK_SIZE = 256
FILETYPES = ['DEL', 'SEQ', 'PRG', 'USR', 'REL']
def num_sectors(track):
"""For a 40 track disk, return the number of sectors in the specified track"""
if track > 0 and track <= 17:
return 21
elif track >= 18 and track <= 24:
return 19
elif track >= 25 and track <= 30:
return 18
elif track >= 31 and track <= 40:
return 17
else:
raise Exception("invalid track number '%d'" % track)
def track_offset(track):
cumsum_sectors = sum([num_sectors(i) for i in range(1, track)])
return cumsum_sectors * BLOCK_SIZE
def read_block(data, track, sector):
offset = track_offset(track) + sector * BLOCK_SIZE
return data[offset:offset + BLOCK_SIZE]
def rem_pad_bytes(petscii_string):
"""shorten the name to exclude the pad characters"""
padded_idx = -1
for i in range(len(petscii_string)):
if petscii_string[i] == 0xa0:
padded_idx = i
return petscii_string[:padded_idx]
return petscii_string
def read_dir_block(block):
offset = 0
for i in range(8):
filetype = block[offset + 2]
actual_filetype = filetype & 7
if (actual_filetype) != 0:
fname = block[offset + 5: offset + 0x15]
fname = rem_pad_bytes(fname)
fname = fname.decode('utf-8')
file_track, file_sector = block[offset + 3], block[offset + 4]
size_hi, size_lo = block[offset + 0x1f], block[offset + 0x1e]
num_sectors = size_hi * 256 + size_lo
typename = FILETYPES[actual_filetype]
print("%s[%02x]\t'%s'\t\t# sectors: %d\t-> (%d, %d)" % (typename, filetype,
fname,
num_sectors,
file_track, file_sector))
offset += 0x20
def read_directory(data):
"""
for i in range(0, 19):
dir_block = read_block(data, 18, i)
print("\n\nDIR BLOCK %d" % i)
print(dir_block)
"""
next_track = 18
next_sector = 1
while next_track != 0:
print("reading track = %d sector = %d" % (next_track, next_sector))
dir_block = read_block(data, next_track, next_sector)
next_track, next_sector = dir_block[0], dir_block[1]
read_dir_block(dir_block)
def read_bam(data):
bam_block = read_block(data, 18, 0)
disk_dos_version = bam_block[2]
bam_entries = bam_block[0x04:0x90]
disk_name = rem_pad_bytes(bam_block[0x90:0xa0]).decode('utf-8')
print("DISK NAME '%s', DOS version: $%02x" % (disk_name, disk_dos_version))
def parse_d64_file(path):
with open(path, 'rb') as infile:
data = infile.read()
print("# read: ", len(data))
read_bam(data)
read_directory(data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='d64.py - disk image parser')
parser.add_argument('d64file', help='d64 format file')
args = parser.parse_args()
parse_d64_file(args.d64file)
| UTF-8 | Python | false | false | 3,163 | py | 8 | d64.py | 2 | 0.554221 | 0.523237 | 0 | 96 | 31.9375 | 88 |
nextstrain/cli | 3,023,657,004,162 | 573083794d785ce759adeb05d045e1a4427fb120 | 8ea3d5b0ad92fd3ed8ed0678f317cf3dc2a914da | /nextstrain/cli/command/login.py | 25f58e45340045fa5ecedc71f480c378a5de653f | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
]
| permissive | https://github.com/nextstrain/cli | 0ed57c4cd0fe75eac4f68b7830f8b661b713858b | 3c93e87c2ac6bdd1b6a913070f709067b29a6cc1 | refs/heads/master | 2023-09-02T13:46:08.170498 | 2023-08-31T17:07:44 | 2023-08-31T17:07:44 | 139,047,738 | 26 | 22 | MIT | false | 2023-09-14T20:35:23 | 2018-06-28T17:13:28 | 2023-05-28T14:15:05 | 2023-09-14T20:35:22 | 1,261 | 25 | 20 | 61 | Python | false | false | """
Log into Nextstrain.org and save credentials for later use.
The first time you log in, you'll be prompted for your Nextstrain.org username
and password. After that, locally-saved authentication tokens will be used and
automatically renewed as needed when you run other `nextstrain` commands
requiring log in. You can also re-run this `nextstrain login` command to force
renewal if you want. You'll only be prompted for your username and password if
the locally-saved tokens are unable to be renewed or missing entirely.
If you log out of Nextstrain.org on other devices/clients (like your web
browser), you may be prompted to re-enter your username and password by this
command sooner than usual.
Your password itself is never saved locally.
"""
from functools import partial
from getpass import getpass
from inspect import cleandoc
from os import environ
from ..authn import current_user, login, renew
from ..errors import UserError
getuser = partial(input, "Username: ")
def register_parser(subparser):
parser = subparser.add_parser("login", help = "Log into Nextstrain.org")
parser.add_argument(
"--username", "-u",
metavar = "<name>",
help = "The username to log in as. If not provided, the :envvar:`NEXTSTRAIN_USERNAME`"
" environment variable will be used if available, otherwise you'll be"
" prompted to enter your username.",
default = environ.get("NEXTSTRAIN_USERNAME"))
parser.add_argument(
"--no-prompt",
help = "Never prompt for a username/password;"
" succeed only if there are login credentials in the environment or"
" existing valid/renewable tokens saved locally, otherwise error. "
" Useful for scripting.",
action = 'store_true')
parser.add_argument(
"--renew",
help = "Renew existing tokens, if possible. "
" Useful to refresh group membership information (for example) sooner"
" than the tokens would normally be renewed.",
action = "store_true")
parser.epilog = cleandoc("""
For automation purposes, you may opt to provide environment variables instead
of interactive input and/or command-line options:
.. envvar:: NEXTSTRAIN_USERNAME
Username on nextstrain.org. Ignored if :option:`--username` is also
provided.
.. envvar:: NEXTSTRAIN_PASSWORD
Password for nextstrain.org user. Required if :option:`--no-prompt` is
used without existing valid/renewable tokens.
""")
return parser
def run(opts):
if opts.renew:
user = renew()
if not user:
raise UserError("Renewal failed or not possible. Please login again.")
return
user = current_user()
if not user:
username = opts.username
password = environ.get("NEXTSTRAIN_PASSWORD")
if opts.no_prompt and (username is None or password is None):
raise UserError("No Nextstrain.org credentials found and --no-prompt prevents interactive login.")
print("Logging into Nextstrain.org…")
print()
if username is not None:
print(f"Username: {username}")
else:
username = prompt(getuser)
if password is not None:
print("Password: (from environment)")
else:
password = prompt(getpass)
print()
user = login(username, password)
print()
else:
if opts.username is not None and opts.username != user.username:
raise UserError(f"""
Login requested for {opts.username}, but {user.username} is already logged in.
Please logout first if you want to switch users.
""")
print(f"Logged into nextstrain.org as {user.username}.")
print("Log out with `nextstrain logout`.")
def prompt(prompter):
try:
return prompter()
except (EOFError, KeyboardInterrupt):
print()
raise UserError("Aborted by user input")
| UTF-8 | Python | false | false | 4,147 | py | 132 | login.py | 75 | 0.637636 | 0.637636 | 0 | 122 | 32.97541 | 110 |
byungjur96/Algorithm | 8,615,704,406,988 | b70223d7ae6a31054602755da231ec6bb4aedb38 | e335514159ccc0792abda335de2c338946ec4e1e | /11728.py | 171adeb94b9df107e0ab5a88ea1cd6cc5c192eac | []
| no_license | https://github.com/byungjur96/Algorithm | 34455c18e2c5f9fe07de4676af0c1b674ebe4e14 | 17440744d6be1d1fb2879865c15b170883098f53 | refs/heads/master | 2022-07-17T04:16:53.875357 | 2022-07-05T09:23:27 | 2022-07-05T09:23:27 | 167,967,581 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n, m = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
a.sort()
b.sort()
result = []
a_idx = 0
b_idx = 0
while a_idx < n or b_idx < m:
if a_idx == n:
result.append(b[b_idx])
b_idx += 1
elif b_idx == m:
result.append(a[a_idx])
a_idx += 1
elif a[a_idx] < b[b_idx]:
result.append(a[a_idx])
a_idx += 1
elif b[b_idx] <= a[a_idx]:
result.append(b[b_idx])
b_idx += 1
print(" ".join(list(map(str, result)))) | UTF-8 | Python | false | false | 533 | py | 220 | 11728.py | 219 | 0.487805 | 0.476548 | 0 | 26 | 19.538462 | 39 |
yuhao0925/toutiao | 17,961,553,267,940 | 6795c3e112df33d7c5a1acc378c96d3632bba35e | 3b6e4fb38214bdd8f5cccede3fc993d897ff8455 | /common/cache/constants.py | 72ff8a9312b8ab850dae4939a4e437de116d1433 | []
| no_license | https://github.com/yuhao0925/toutiao | 09c18c71ecada2f131323d79b77ea05fd1e25061 | 695af80792ca8f9daa2e35e57fd57ef24f0e30e4 | refs/heads/master | 2022-12-11T08:54:57.225568 | 2019-12-18T12:59:18 | 2019-12-18T12:59:18 | 228,839,894 | 0 | 0 | null | false | 2022-09-23T22:32:39 | 2019-12-18T12:54:07 | 2019-12-18T12:59:42 | 2022-09-23T22:32:38 | 1,927 | 0 | 0 | 10 | Python | false | false | import random
class BaseCacheTTL():
# 缓存有效期,单位是秒,要防止雪崩
TTL = 60 * 60 * 2 #基础过期时间
MAX_DELTA = 60 * 30 #随机时间上限
# @classmethod也不需要self参数,但第一个参数需要是表示自身类的cls参数。
@classmethod
def get_TTL(cls):
# 返回有效期时间范围内的随机值
return cls.TTL + random.randrange(0,cls.MAX_DELTA)
# 为了防止雪崩,在不同的数据设置缓存有效期时采用设置不用的有效期方案,
# 所以采用继承的方式
class UserCacheTTL(BaseCacheTTL):
# 用户信息缓存数据过期时间
pass
# 不存在的用户信息缓存过期时间5-10分钟,防穿透
class UserNotExistCacheTTL(BaseCacheTTL):
TTL = 60 * 5 #基础过期时间
MAX_DELTA = 60 * 5 #随机时间上限 | UTF-8 | Python | true | false | 852 | py | 30 | constants.py | 29 | 0.672794 | 0.637868 | 0 | 24 | 21.708333 | 58 |
MalcolmChen97/CS113-Networking-Assignments | 13,821,204,779,779 | 04d2c676f838b4d33af44d01f88ee0e914f89d98 | fa709028dd41abd61836a9ba71fc1424f1d793e6 | /hw2/code/smtp_client.py | 0d2598162e050ec7f4845c265c90c121ee606a16 | []
| no_license | https://github.com/MalcolmChen97/CS113-Networking-Assignments | 2ebbc5e6229aff080d8d42a915cbb2fa7a73f79a | bd930e07663d7ec6541c34a04604adefd78d8ddf | refs/heads/master | 2021-07-22T10:50:50.261150 | 2017-10-31T05:44:26 | 2017-10-31T05:44:26 | 108,331,121 | 0 | 1 | null | false | 2017-10-31T07:13:12 | 2017-10-25T22:04:37 | 2017-10-26T03:18:01 | 2017-10-31T05:44:40 | 5,968 | 0 | 1 | 1 | Python | false | null | from socket import *
msg= "\r\n I love computer networks!"
endmsg = "\r\n.\r\n"
#connection
mailserver = "localhost"
clientsocket = socket(AF_INET,SOCK_STREAM)
clientsocket.connect((mailserver,25))
recv = clientsocket.recv(1024).decode()
print(recv)
if recv[:3] != '220':
print('220 reply not received from server.')
#helo
heloCommand = 'HELO crystalcove.com\r\n'
clientsocket.send(heloCommand.encode())
recv1 = clientsocket.recv(1024).decode()
print(recv1)
if recv1[:3] != '250':
print('250 reply not received from server.')
#MAIL FROM
while True:
mail_addr = input("Email: ")
mail_addr_command_version = '<' + mail_addr + '>'
mailfromCommand = 'MAIL FROM: ' + mail_addr_command_version +'\r\n'
clientsocket.send(mailfromCommand.encode())
recv2 = clientsocket.recv(1024).decode()
print(recv2)
if recv2[:3] != '250':
print('invalid mail address')
continue
break
#RCPT TO
while True:
mail_addr = input('Recipient Email: ')
mail_addr_command_version = '<' + mail_addr + '>'
rcpttoCommand = 'RCPT TO: ' + mail_addr_command_version +'\r\n'
clientsocket.send(rcpttoCommand.encode())
recv3 = clientsocket.recv(1024).decode()
print(recv3)
if recv3[:3] != '250':
print('invalid mail address')
continue
break
#DATA
dataCommand = 'DATA\r\n'
clientsocket.send(dataCommand.encode())
recv4 = clientsocket.recv(1024).decode()
print(recv4)
#Send message
clientsocket.send(msg.encode())
clientsocket.send(endmsg.encode())
#QUIT
quitCommand = 'QUIT\r\n'
clientsocket.send(quitCommand.encode())
recvq = clientsocket.recv(1024).decode()
print(recvq)
clientsocket.close()
| UTF-8 | Python | false | false | 1,641 | py | 5 | smtp_client.py | 4 | 0.687995 | 0.652041 | 0 | 111 | 13.783784 | 69 |
justicarxxvi8/PycharmProjects | 19,078,244,768,569 | ad59f0d1497d6c30e097edce5cd3e32ba47406d4 | 9dba9fc1c6258877937a3473e36621f934c4e814 | /sandbox/palindrome.py | 5868e55af7b165d738f01dcbac865c4b374a7e4b | []
| no_license | https://github.com/justicarxxvi8/PycharmProjects | bb9c31ded99c10e58e229591ae6992fa1c717a97 | f8d6a55e8d84307e37bc34ca33131f9ab87879a2 | refs/heads/master | 2021-08-20T07:17:29.207085 | 2017-11-28T14:08:07 | 2017-11-28T14:08:07 | 112,343,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def checkPalindrome(inputString):
reversed_string = inputString[::-1]
if reversed_string == inputString:
return True
elif reversed_string != inputString:
return False
checkPalindrome("Fad")
| UTF-8 | Python | false | false | 225 | py | 25 | palindrome.py | 23 | 0.666667 | 0.662222 | 0 | 10 | 21.5 | 40 |
Star-Coder-7/Functions | 8,280,696,949,298 | 95355e2e01b77fd1debfddbdd797ac5a4640a5a2 | 031fc4d181aefb3d086e3b2ce8577c5dd8d43506 | /parameter_types.py | b43f032a291bee08ebdbad69b6300cbfd7249f22 | []
| no_license | https://github.com/Star-Coder-7/Functions | 49d945985cdbf4f8faa69c6ba4a16c4fba6b896e | f7ee3c7971d16fe3d8091623cbcd46b9f6bd635b | refs/heads/main | 2023-03-11T02:07:46.724803 | 2021-02-28T21:40:47 | 2021-02-28T21:40:47 | 343,221,152 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def func(p1, p2, *args, k, **kwargs):
print(f"positional-or-keyword:....{p1}, {p2}")
print(f"var_positional (*args):...{args}")
print(f"keyword:..................{k}")
print(f"var_keyword:...............{kwargs}")
func(1, 2, 3, 4, 5, 9, k=6, key1=7, key2=8)
| UTF-8 | Python | false | false | 276 | py | 13 | parameter_types.py | 12 | 0.492754 | 0.438406 | 0 | 8 | 33.5 | 50 |
Roberto09/Curso-MAEs-Python | 10,995,116,309,763 | ccd277f20b2445cac0b7ea82f2473936cc55c4e7 | 3209d15715b3591649c9bd671d4b4bf77a8b251a | /Sesion1Clase.py | 96cc8d8ca073e14e0f525971d9bf56b260c40138 | []
| no_license | https://github.com/Roberto09/Curso-MAEs-Python | d0adbcdbf1d1097aa686b48acfe618ea76ffc4f9 | 0ababb3a1c62d5485ac3fa677f50e4471b0e3374 | refs/heads/master | 2021-05-20T11:07:39.558290 | 2020-05-21T04:29:24 | 2020-05-21T04:29:24 | 252,266,780 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
def formula_cuadratica(x, y, z):
p = (-y + math.sqrt(pow(y, 2) - 4 * x * z))/(2*x)
r = (-y - math.sqrt(pow(y, 2) - 4 * x * z))/(2*x)
return p, r
def main():
x = 10.1234
print(round(x, 2))
"""
# Esto es una constante
NOMBRE_ENTRENADOR = input("Dime tu nombre entrenador! ")
# Esto es una varible
edad_entrenador = int(input("Dime tu edad: "))
print("Hola ", NOMBRE_ENTRENADOR, " tienes: ", edad_entrenador, " años!")
print("Es momento de ayudar a Pikachu!")
#Formula cuadratica (-b +- sqrt(pow(b, 2) - 4 * a * c))) / (2*a)
a = float(input("Dame a: "))
b = float(input("Dame b: "))
c = float(input("Dame c: "))
x1, x2 = formula_cuadratica(a, b, c)
print("Pikachu debe atacar en los tiempos: ", round(x1, 2), " y ", round(x2, 2), " !")
"""
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 926 | py | 8 | Sesion1Clase.py | 6 | 0.501622 | 0.477838 | 0 | 38 | 23.236842 | 90 |
naveens239/cpm_django | 9,826,885,214,283 | 1f9d6b1a61ffcc748fa5392514d304d645d0a042 | ae9cf120a2d5e7373cdef45d33f22609e0d0ab69 | /src/cpm/migrations/0007_auto_20160307_1407.py | 51dca8b26e2ec9bda7c2b79f16f68943370054bb | []
| no_license | https://github.com/naveens239/cpm_django | 1c285e7ca2ee2426b17c9a518f13e08a53c4f6c0 | 3cb7f42945b02af929e9f249935a23b1f9ee362d | refs/heads/master | 2021-01-20T19:50:01.301086 | 2017-10-13T17:59:24 | 2017-10-13T17:59:24 | 52,895,664 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-07 08:37
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cpm', '0006_auto_20160307_0328'),
]
operations = [
migrations.AlterField(
model_name='stagesetting',
name='total_true_checks',
field=models.IntegerField(validators=[django.core.validators.MaxValueValidator(25), django.core.validators.MinValueValidator(0)]),
),
]
| UTF-8 | Python | false | false | 585 | py | 72 | 0007_auto_20160307_1407.py | 42 | 0.654701 | 0.594872 | 0 | 21 | 26.857143 | 142 |
CatLassie/MLex1 | 5,927,054,874,463 | dac1b6d4e8552317ae93d2279eeb81d003a4fa98 | 09df5b16f9e9d9b759e76e605c4e7a8893a9a299 | /Proj1/TGaussianNB.py | df26c32efc7b8b58f51a146c0de8b67745a4b3c4 | []
| no_license | https://github.com/CatLassie/MLex1 | 463576ff2334ca89c0afb8b54db53342e01700aa | 7c8c3c7f6962a463051ab0c8d5654c63e34d8992 | refs/heads/master | 2021-09-05T21:03:32.217066 | 2017-11-27T22:41:07 | 2017-11-27T22:41:07 | 111,956,938 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Nov 22, 2017
@author: martin
'''
from Test import Test
from sklearn.naive_bayes import GaussianNB
class TGaussianNB(Test):
'''
classdocs
'''
def train(self):
gnb= GaussianNB()
# train model
self.model= gnb.fit(self.train_x, self.train_y)
def predict(self):
self.pred_y= self.model.predict(self.test_x)
| UTF-8 | Python | false | false | 400 | py | 57 | TGaussianNB.py | 11 | 0.58 | 0.565 | 0 | 23 | 16.217391 | 55 |
ijufumi/demo-python | 12,738,873,001,105 | 45dbcc77100a8c7d8efa9daeb77b8ee48c7eb978 | 4bdd7b5eaf1f9119c50fd41265195b6fd0abc74e | /result_of_deep-learning-from-scratch/chap2/2_3_2.py | 3a140c6dd9e9dda7c9579cb9c595b020765a3b79 | [
"MIT"
]
| permissive | https://github.com/ijufumi/demo-python | 6a0ebc8eb20b2836af9df2a972bb2f277614736f | 10e621e76ddd63f88b759d630975900c9ec792f6 | refs/heads/master | 2023-08-22T05:28:13.613312 | 2023-08-11T00:39:27 | 2023-08-11T00:39:27 | 207,212,735 | 0 | 0 | MIT | false | 2022-12-10T00:18:48 | 2019-09-09T03:03:37 | 2022-01-14T03:56:44 | 2022-12-10T00:18:13 | 40,227 | 0 | 0 | 0 | Python | false | false | import numpy as np
x = np.array([0, 1])
w = np.array([0.5, 0.5])
b = -0.7
ans = np.sum(x * w) + b
print(ans)
| UTF-8 | Python | false | false | 110 | py | 24 | 2_3_2.py | 14 | 0.536364 | 0.463636 | 0 | 7 | 14.714286 | 24 |
grey-felipe/Movie-app-prototype | 6,640,019,455,450 | 474580e0fafb9ea78bc81c890b15924340df4416 | f630d6ccef4dce767f83579cb746eb821cc96e1d | /app/users/views.py | 554a14df9b0da72006fc37e21e8fbf01d92e17db | []
| no_license | https://github.com/grey-felipe/Movie-app-prototype | 3ed995a6f248ebf5dbc71336181d1330824c60cc | 0b0ac3786d4f27ec9a64f23ad6d32dcbcd8733ee | refs/heads/develop | 2020-04-21T23:36:52.490340 | 2019-02-11T07:03:18 | 2019-02-11T07:03:54 | 169,951,761 | 0 | 0 | null | false | 2020-10-27T21:35:54 | 2019-02-10T06:59:15 | 2019-02-11T07:04:09 | 2020-10-27T21:35:53 | 19 | 0 | 0 | 2 | Python | false | false | from flask_restful import Resource, Api
from flask import jsonify, make_response, request, render_template
from werkzeug.security import generate_password_hash, check_password_hash
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required,
fresh_jwt_required, jwt_refresh_token_required, get_raw_jwt, get_jwt_identity)
from app.utilities.type_validator import (
isLegit, isPhoneNumber, catch_empty_strings, validate_strings, isLegit)
from sqlalchemy import exc
from app import db
from app import jwt
from app import mail
from .models import Users, Tokens
from . import users_vone
from .. import app
import os
from flask_mail import Message
import datetime
import jsonschema
api = Api(users_vone)
def hash_maker(password):
hashed_password = generate_password_hash(password, method='sha256')
return hashed_password
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
token = Tokens.query.filter_by(token=decrypted_token['jti']).first()
if token:
return True
else:
return False
@app.errorhandler(jsonschema.ValidationError)
def onValidationError(e):
return make_response(jsonify({'message': 'Please provide correct information.'}), 400)
class SignUp(Resource):
@app.validate('register', 'user_reg_obj')
def post(self):
data = request.get_json()
string_list = [data['username'], data['email'], data['password'],
data['phone']]
# if not validate_strings(string_list) or catch_empty_strings(string_list):
# return make_response(jsonify({'message': 'Please provide correct information.'}), 400)
# if not isLegit(data['email']):
# return make_response(jsonify({'message': 'Please provide a valid email.'}), 400)
try:
hashed_password = hash_maker(data['password'])
user = Users(username=data['username'],
email=data['email'], bio=data['bio'], phone=data['phone'], password=hashed_password, isAdmin=data['isAdmin'], image=data['image'])
# db.session.add(user)
# db.session.commit()
return make_response(jsonify({'message': 'Sign up was successful.'}), 200)
except exc.IntegrityError:
db.session.rollback()
return make_response(jsonify({'message': 'User already exists.'}), 400)
class Login(Resource):
def post(self):
data = request.get_json()
string_list = [data['email'], data['password']]
if not validate_strings(string_list) or catch_empty_strings(string_list):
return make_response(jsonify({'message': 'Please provide correct information.'}), 400)
if not isLegit(data['email']):
return make_response(jsonify({'message': 'Please provide a valid email.'}), 400)
try:
user = Users.query.filter_by(email=data['email']).first()
if user is None:
return make_response(jsonify({'message': 'Login failed! unknown user.'}), 400)
elif not check_password_hash(user.password, data['password']):
return make_response(jsonify({'message': 'Login failed! wrong password.'}), 400)
else:
user_dict = {'email': user.email, 'username': user.username,
'isAdmin': user.isAdmin}
access_token = create_access_token(identity=user_dict)
refresh_token = create_refresh_token(identity=user_dict)
tokens = {'access_token': access_token,
'refresh_token': refresh_token}
return make_response(jsonify({'message': tokens}), 200)
except exc:
db.session.rollback()
return make_response(jsonify({'message': 'Login failed! unknown user.'}), 400)
class GetUser(Resource):
@jwt_required
def get(self):
current_user = get_jwt_identity()
token_email = current_user['email']
try:
result = Users.query.filter_by(email=token_email).first()
user_dict = {'username': result.username, 'email': result.email,
'bio': result.bio, 'image': result.image, 'phone': result.phone}
return make_response(jsonify({'message': user_dict}), 200)
except exc.DatabaseError:
db.session.rollback()
return make_response(jsonify({'message': 'Operation failed, user does not exist.'}), 400)
class GetUsers(Resource):
@jwt_required
def get(self):
current_user = get_jwt_identity()
isAdmin = current_user['isAdmin']
if not isAdmin:
return make_response(jsonify({'message': 'User is not authorized.'}), 401)
try:
result = Users.query.all()
if result:
user_list = []
for user in result:
user_dict = {'username': user.username, 'email': user.email,
'bio': user.bio, 'image': user.image, 'phone': user.phone}
user_list.append(user_dict)
return make_response(jsonify({'message': user_list}), 200)
else:
return make_response(jsonify({'message': 'No users found.'}), 400)
except exc.DatabaseError:
db.session.rollback()
return make_response(jsonify({'message': 'Operation failed.'}), 400)
class EditUsername(Resource):
@jwt_required
def put(self):
data = request.get_json()
current_user = get_jwt_identity()
user_email = current_user['email']
string_list = [data['username']]
if not validate_strings(string_list) or catch_empty_strings(string_list):
return make_response(jsonify({'message': 'Please provide correct information.'}), 400)
try:
result = Users.query.filter_by(email=user_email).first()
result.username = data['username']
db.session.commit()
return make_response(jsonify({'message': 'Username successfully updated.'}), 200)
except exc:
db.session.rollback()
return make_response(jsonify({'message': 'Operation failed.'}), 400)
class EditBio(Resource):
@jwt_required
def put(self):
data = request.get_json()
current_user = get_jwt_identity()
user_email = current_user['email']
string_list = [data['bio']]
if not validate_strings(string_list) or catch_empty_strings(string_list):
return make_response(jsonify({'message': 'Please provide correct information.'}), 400)
try:
result = Users.query.filter_by(email=user_email).first()
result.bio = data['bio']
db.session.commit()
return make_response(jsonify({'message': 'Bio successfully updated.'}), 200)
except exc:
db.session.rollback()
return make_response(jsonify({'message': 'Operation failed.'}), 400)
class EditImage(Resource):
@jwt_required
def put(self):
data = request.get_json()
current_user = get_jwt_identity()
user_email = current_user['email']
string_list = [data['image']]
if not validate_strings(string_list) or catch_empty_strings(string_list):
return make_response(jsonify({'message': 'Please provide correct information.'}), 400)
try:
result = Users.query.filter_by(email=user_email).first()
result.image = data['image']
db.session.commit()
return make_response(jsonify({'message': 'Image successfully updated.'}), 200)
except exc:
db.session.rollback()
return make_response(jsonify({'message': 'Operation failed.'}), 400)
class ForgotPassword(Resource):
def post(self):
data = request.get_json()
try:
user = Users.query.filter_by(email=data['email']).first()
if user:
user_dict = {'email': user.email, 'username': user.username,
'isAdmin': user.isAdmin}
validity_time = datetime.timedelta(minutes=5)
access_token = create_access_token(
identity=user_dict, expires_delta=validity_time)
reset_link = 'http://127.0.0.1:5000/v1/auth/password/reset/' + access_token
msg = Message(
'Reset Your Password!',
sender='seryaziphillip@gmail.com',
recipients=[user.email])
msg.html = render_template(
'reset_template.html', link=reset_link)
mail.send(msg)
return make_response(jsonify({'message': 'An email has been sent to your address.'}), 200)
else:
return make_response(jsonify({'message': 'Request was not processed, unknown user.'}), 400)
except exc.DatabaseError:
return make_response(jsonify({'message': 'Request was not processed.'}), 400)
class RenderResetPasswordForm(Resource):
def get(self, token):
pass
class ResetPassword(Resource):
@jwt_required
def put(self):
return make_response(jsonify({'message': 'Your password has been edited.'}), 200)
class Logout(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
token = Tokens(token=jti)
try:
db.session.add(token)
db.session.commit()
return make_response(jsonify({'message': 'User was logged out.'}), 200)
except exc.DatabaseError:
db.session.rollback()
return make_response(jsonify({'message': 'Attempt to use invalid token.'}), 400)
class TokenRefresher(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
refresh_token = create_access_token(identity=current_user, fresh=False)
return make_response(jsonify({'message': {'refresh_token': refresh_token}}), 200)
api.add_resource(SignUp, '/signup')
api.add_resource(Login, '/login')
api.add_resource(Logout, '/logout')
api.add_resource(TokenRefresher, '/token/refresh')
api.add_resource(GetUser, '/user')
api.add_resource(GetUsers, '/users')
api.add_resource(EditBio, '/edit/bio')
api.add_resource(EditImage, '/edit/image')
api.add_resource(EditUsername, '/edit/username')
api.add_resource(ForgotPassword, '/password/forgot')
api.add_resource(RenderResetPasswordForm, '/password/reset/<string:token>')
api.add_resource(ResetPassword, '/password/reset')
| UTF-8 | Python | false | false | 10,643 | py | 16 | views.py | 11 | 0.607066 | 0.596354 | 0 | 271 | 38.273063 | 155 |
Zaiyong/csrosetta | 11,579,231,877,901 | c07a75a4a96481b097bb72a6f7e78c416d74ff8b | d90af0def0e29ebaebcf986399fcee65e1e2916c | /python/bmrb.py | a835c9248809b4e85eced805668e3256f8cbbaf3 | []
| no_license | https://github.com/Zaiyong/csrosetta | 2fdbbdd7da24ce971f7f2297a7cd14723cdd59d6 | 539c60664dba3972062002ff4e636c7f029927cb | refs/heads/master | 2020-12-25T15:18:39.274689 | 2020-02-25T09:15:35 | 2020-02-25T09:15:35 | 65,408,072 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ## -*- mode:python;tab-width:2;indent-tabs-mode:t;rm-trailing-spaces:t;python-indent:2 -*-
import subprocess
from os import dup2,path
from os.path import exists
from operator import add
from math import sqrt
from os.path import *
import argparse
import sys
import copy
import shutil
import string
### toolbox library
import library
from os import mkdir , makedirs
from warnings import *
import traceback
#from Bio.PDB.PDBParser import *
#from Bio.PDB import PDBIO
#from numpy import square
from library import mkdirp
import library
class BMRB_Type :
def __init__( self, tags ):
self.tags=tags
self.type=int
class BMRB_Int( BMRB_Type ):
def __init__( self, tags ):
BMRB_Type.__init__( self, tags )
self.type=int
class BMRB_Str( BMRB_Type ):
def __init__( self, tags ):
BMRB_Type.__init__( self, tags )
self.type=str
class BMRB_Float( BMRB_Type ):
def __init__( self, tags ):
BMRB_Type.__init__( self, tags )
self.type=float
_tag_dictionary = {'ID': BMRB_Int(['_Atom_chem_shift.ID','_Atom_shift_assign_ID']),
'RESID': BMRB_Int(['_Atom_chem_shift.Comp_index_ID','_Residue_seq_code','_Atom_one_residue_seq_code','Seq_ID_1','Seq_ID','Comp_index_ID']),
'RESNAME': BMRB_Str(['_Atom_chem_shift.Comp_ID','_Residue_label','_Atom_one_residue_label','Comp_ID_1','Comp_ID']),
'ATOMNAME': BMRB_Str(['_Atom_name','_Atom_chem_shift.Atom_ID','_Atom_one_atom_name','Atom_ID_1','Atom_ID']),
'SHIFT': BMRB_Float(['_Chem_shift_value','_Atom_chem_shift.Val','_Chem_shift_value','Val','Chem_shift_val']),
'SIGMA': BMRB_Float(['_Chem_shift_value_error','_Atom_chem_shift.Val_err','Val_err','_Residual_dipolar_coupling_value_error','Chem_shift_val_err']),
'RDC_TYPE': BMRB_Str(['_Residual_dipolar_coupling_ID','RDC_code']),
'RESID2': BMRB_Int(['_Atom_two_residue_seq_code','Seq_ID_2']),
'RESNAME2': BMRB_Str(['_Atom_two_residue_label','Comp_ID_2']),
'ATOMNAME2': BMRB_Str(['_Atom_two_atom_name','Atom_ID_2']),
'AMBIGUITY': BMRB_Int(['_Atom_chem_shift.Ambiguity_code','Ambiguity_code','_Chem_shift_ambiguity_code','_Chem_shift_ambiguity_type']),
'RDC': BMRB_Float(['_Residual_dipolar_coupling_value','Val'])
}
cs_loop_cols=['ID','RESID','RESNAME','ATOMNAME','SHIFT','SIGMA','AMBIGUITY']
rdc_loop_cols=['RDC_TYPE', 'RESID','RESNAME','ATOMNAME','RESID2','RESNAME2','ATOMNAME2','RDC','SIGMA']
#after reading the BmrbFile content is organized in frames of name <frame> which are organized in self._frames
#according to their category <frame-category>. A frame goes from 'save_<frame>' to 'save_'
#according to this entry:
#save_<frame>
# _Saveframe_category <frame-category>
# .... content
#save_
#From each frame we currently store only the 'loop', that is lines between 'loop_' and 'stop_'. The beginning of a
#loop has entries starting with '_XXX' which give the column names.
#we store each loop of a frame as object of class 'Loop' which has the data-members cols (the column headers) and data
#which is just a list with an entry per line. Each line-entry is a list of tags for this line.
#to extract data from a frame we use 'process_frame( <frame_category>, columns )'
#the columns are translated using the _tag_dictionary which takes care of ambiguous nameing found in different BMRB styles.
#the output is a dictionary {'colname1': [data1, data2, data3, ... , dataN ], 'colname2':[data1, data2, ..., dataN] }
#if multiple frames of the same category have fitting columns these will be appended to the dictionary...
from basic.Tracer import Tracer
tr = Tracer( "bmrb" )
#reads a BmrbFile into _frames
class BmrbFile:
###################################################################
# class Loop
class Loop:
def __init__(self, cols, data):
if len(data):
self.n_cols_in_data = len(data[0])
self.n_cols = len(cols)
self.n_repeats_per_line = self.n_cols_in_data/self.n_cols
if self.n_cols_in_data % self.n_cols:
msg='Loop-Header is inconsistent with data-entries, check number of columns\n'
msg+='-'*80+'\n'
msg+='Header: \n '
msg+='\n '.join(cols)
msg+='\n\nFirst-data entry: '+' | '.join(data[0])+'\n'
msg+='-'*80
raise library.InconsistentInput(msg)
else:
msg='Loop has no data-entries\n'
msg+='-'*80+'\n'
msg+='Header: \n'
msg+='\n '.join(cols)
msg+='-'*80
raise library.InconsistentInput(msg)
self.cols=cols
self.data=data
def size( self ):
return len(self.cols)
def __str__( self ):
return "[ "+", ".join(self.cols)+" ]"
def __repr__( self ):
return self.__str__()
def process( self, cols ):
trloop = Tracer( 'loop', tr )
ind={}
types={}
for col in cols:
for tag in _tag_dictionary[col].tags:
if tag in self.cols:
ind[col]=self.cols.index(tag);
types[col]=_tag_dictionary[col].type
break
#should have now the indices of the requested columns and their type in ind and types
#if no fitting columns return
if len(ind)==0: return
trloop.Debug('labels (bmrb): ', self.cols)
trloop.Debug('column indices: ', ind)
#extract output dictionary
output={}
for col in cols:
output[col]=[]
#lines are already split into columns
#figure out which bmrb-type columns fit to the requested columns (in cols) in this loop
for line in self.data:
if line[0][0]=='#': continue
for i,col in enumerate(cols):
# print 'F', i, col, line, self.n_repeats_per_line
if not col in ind.keys() or line[ind[col]]=='.':
trloop.Debug('found empty columns: ',cols[i], ' and will remove it')
del cols[i]
return self.process( cols )
for repeat in range(0,self.n_repeats_per_line):
# print output[col]
# print types[col]
# print ind[col]
# print 'colsindata',self.n_cols_in_data
# print ind[col]+self.n_cols_in_data*repeat, repeat
output[col].append(types[col](line[ind[col]+self.n_cols_in_data/self.n_repeats_per_line*repeat]))
return output
####################################################################
# class Frame:
# a frame starts with save_XXXX and ends with save_
# frames contain 'fields' and 'loops'. A loop is basically a table, first define what the columns are, then the data
class Frame:
def __init__( self, name, fields, loops ):
self.name=name
self.fields=fields
self.loops=loops
def __repr__( self ):
str="Frame %s: \n"%self.name
for k,f in self.fields.iteritems():
str=str+"%s: %s\n"%(k,f)
str=str+"and %d loops\n"%len(self.loops)
return str
###########################################################
#BmrbFile
def __init__(self, file, errors=None ):
self._frames={}
self.parse_file( open(file,'r'), errors )
self.star3=False
def get_frame_category(self,line):
tags=line.split()
if len(tags)<1: return None
try:
s3tag=tags[0].split('.')[1]
if s3tag!='Sf_category': return None
self.star3=True
except IndexError:
if tags[0]!='_Saveframe_category': return None
self.star3=False
return tags[1]
#-------------------------------------------------------
# main capture routine= read a frame from save_XXXX to save_
# find next save_<frame>
# return the name, i.e., <frame> and the category <frame-category>
def Next_Save_Frame( self, file ):
name=''
for line in file:
tags=string.split(line)
tr.Debug('SKIP BETWEEN FRAMES: ',line[:-1])
if len(tags)>0 and len(tags[0])>=5 and tags[0][:5]=='save_':
tr.Debug('READ FRAME: ',line[:-1])
name=tags[0][5:]
for line in file:
category=self.get_frame_category( line)
if category: break
return category, name
return 'NO_CATEGORY', 'NO_NAME'
#-----------------------------------------------------
# read fields and loops of current frame
# return as Loops and Fields (classes see above )
def capture_loops( self, file, errors ):
loops=[]
fields={}
multi_line_field=None
col_nr=-1;
for line in file:
tr.Debug('READ FRAME :',line[:-1])
if "'" in line or '"' in line:
tags=[]
within_field=False
current_field=""
for c in line:
if c == "'" or c=='"':
if within_field and len(current_field):
tags.append(current_field)
current_field=''
within_field=False
else:
within_field=True
continue
if within_field:
current_field+=c
elif c==' ' and len(current_field):
tags.append(current_field)
current_field=''
elif c in string.whitespace:
continue
else:
current_field+=c
# print 'T', tags, line
else:
tags=string.split(line)
if len(tags) and tags[0][0]=='#':
continue
for i,t in enumerate(tags):
if t[0]=='#':
tags=tags[0:i]
break
if len(tags)>0 and tags[0]=='loop_':
#this is the sart of a loop
col_nr=0;
col_id=[];
data=[];
continue
if col_nr<0 and len(tags)>0 and tags[0][0]=='_':
#this is a field for the frame
if self.star3:
fkey=tags[0].split('.')[1]
else:
fkey=tags[0]
if len(tags)>1:
fval=tags[1]
fields[fkey]=fval
else: multi_line_field='START'
continue
if col_nr>=0 and len(tags)>0 and tags[0][0]=='_':
#this is a field for the current loop
if self.star3:
name=tags[0].split('.')[1]
else:
name=tags[0]
col_id.append(name);
col_nr+=1;
continue
if col_nr>=0 and len(tags)>0 and tags[0]=='stop_':
#end of a loop
try:
loops.append( self.Loop( col_id, data ))
except library.InconsistentInput as exc:
exc.add("This loop will be ignored. Hopefully nothing important !!!")
if not errors is None: errors.append(exc)
else: raise exc
col_nr=-1
continue
if col_nr>=0 and len(tags)>0:
#data entry of current loop
data.append(tags);
continue
if len(tags)>0 and tags[0]=='save_':
return loops, fields
if len(tags)>0 and tags[0][0]==';' and multi_line_field:
if multi_line_field=='START':
multi_line_field='CURRENT'
mlf_data=[]
elif multi_line_field=='CURRENT':
multi_line_field=None
fields[fkey]=mlf_data
continue
if len(tags)>0 and multi_line_field=='CURRENT':
mlf_data.append(line[:-1])
if col_nr>=0: #col_nr >= 0 means we are still in a loop --- EOF without getting 'stop_'
try:
loops.append( self.Loop( col_id, data ) )
except library.InconsistentInput as exc:
exc.add("This loop will be ignored. Hopefully nothing important !!!")
if not errors is None: errors.append(exc)
else: raise exc
return loops, fields
#go through all frames and store the respective loops
def parse_file( self, file, errors=None ):
MAX_FRAMES=1000 #to avoid hanging in corrupted files
while MAX_FRAMES>0:
MAX_FRAMES-=1
SAVE_FRAME_CATEGORY, name=self.Next_Save_Frame( file );
tr.Info( 'reading frame ( %s ): %s ...'%(SAVE_FRAME_CATEGORY, name))
if SAVE_FRAME_CATEGORY=='NO_CATEGORY':
break
loops, fields = self.capture_loops( file, errors )
self.add_frame(name, loops, fields, SAVE_FRAME_CATEGORY )
def add_frame(self, name, loops, fields, CATEGORY='GENERIC'):
# print 'ADD: ', name, loops, fields, CATEGORY
if not CATEGORY in self._frames:
self._frames[CATEGORY]={}
self._frames[CATEGORY][name]=self.Frame(name, fields, loops )
#how many different frame categories have been found ?
def nframes( self ):
return len( self._frames )
#extract columns according to _tag_dictionary from loop
def get_frame( self, categories ):
for category in categories:
try:
frames=self._frames[category]
return category, frames
except KeyError:
pass
raise KeyError
#process frames of a certain category and return output that fits the requested columns
def process_frame( self, categories, cols ):
outputs=[]
frames=None
for category in categories:
try:
frames=self._frames[category]
break
except KeyError:
pass
if not frames:
raise library.MissingInput("Cannot find category %s. Maybe not a proper BMRB file?"%category)
for frame in frames.itervalues():
for loop in frame.loops:
#print 'L: ', loop
tr.Debug("process loop with %3d columns in frame %s"%(loop.size(),frame.name))
tr.Debug("loop: %s"%loop)
output=loop.process( copy.copy(cols) )
if output:
outputs.append((len(output),frame.name,output))
outputs=sorted(outputs,key=lambda x: -len(x[2].keys()))
return outputs
_SEQUENCE_FIELD_KEYS=['_Mol_residue_sequence','Polymer_seq_one_letter_code']
def get_sequence( bmrb_file, verbose=0 ):
categories=['monomeric_polymer','entity']
# try:
category,seq_frames=bmrb_file.get_frame(categories)
# except KeyError:
# raise library.InconsistentInput('Cannot find frame describing the molecule: %s'%' '.join(categories))
sequences={}
for frame in seq_frames.itervalues():
for key in _SEQUENCE_FIELD_KEYS:
try:
sequences[frame.name]=''.join([ x.strip() for x in frame.fields[key]])
except KeyError:
pass
if len(sequences.keys())<1:
raise library.InconsistentInput("Cannot find any of sequence fields '%s'. Maybe not a proper BMRB file?"%"','".join(_SEQUENCE_FIELD_KEYS))
return sequences[sequences.keys()[0]], sequences, len(sequences)
def read_fullbmrb_or_trunkfile( filename, types, verbose=0, errors=None ):
fasta=None
if verbose==0:
tr.set_priority( 1 )
bmrb_file = BmrbFile( filename, errors )
try:
seq_frame=bmrb_file.get_frame(['monomeric_polymer','entity'])
except KeyError:
seq_frame=None
cs_frame=None
for type in types:
try:
cs_frame=bmrb_file._frames[type]
break
except KeyError:
pass
if not cs_frame:
type=types[0]
#reading under assumption of full STAR structure didn't work, reread from start this time assume that loops are at top-level
loops, fields = bmrb_file.capture_loops( open(filename, 'r' ), errors )
tr.Debug('L', loops)
tr.Debug('F', fields)
bmrb_file.add_frame('generic',loops,fields,type);
seq_key=None
seq_key=set(fields).intersection( set( _SEQUENCE_FIELD_KEYS ))
if not seq_frame and len(seq_key)>0:
fasta=''.join([ x.strip() for x in fields[seq_key.pop()]])
nmol=1
if verbose and seq_frame:
tr.Info("*"*30)
tr.Info("Molecule(s) found in BMRB: %s"%(', '.join(seq_frame[1].keys())))
tr.Info("*"*30)
if not fasta:
try:
(fasta, fastas, nmol)=get_sequence( bmrb_file, verbose )
except KeyError:
fasta=None
nmol=1
if nmol>1:
print "WARNING: found multiple molecules in BMRB"
print fasta
exit()
return bmrb_file, fasta
| UTF-8 | Python | false | false | 14,506 | py | 319 | bmrb.py | 273 | 0.638081 | 0.62967 | 0 | 456 | 30.811404 | 157 |
while777/while777 | 18,811,956,771,043 | ee4db386b9156d28956069020f2f4e7ffc1e6fc9 | 653bad0fe905e6ce885d458ce8e1275603ec8bc9 | /SolutionSearching/Bisect.py | de1c9b795861532e9d7d73b555d9915c40e46063 | []
| no_license | https://github.com/while777/while777 | 79ab386fb7c85979d0c278c29701bec6f9d611c9 | 1c3bff838e18c2a480c78ec287decf5dfd223733 | refs/heads/master | 2020-12-08T05:23:46.432057 | 2019-08-19T04:28:04 | 2019-08-19T04:28:04 | 66,278,815 | 0 | 0 | null | false | 2017-08-23T19:48:51 | 2016-08-22T14:17:20 | 2017-08-23T19:38:07 | 2017-08-23T19:48:43 | 0 | 0 | 0 | 2 | Python | null | null | #using matlab to test the results for function bsformula
from numpy import exp, sqrt, max, mean, std, log, cumsum, min
from numpy.random import randn, rand
import numpy as np
import BS
from BS import bsformula
def bisect(target, targetfunction, start=None, bounds=None, tols = 0.001, maxiter=1000):
BS.c = 0
cps = 0.01
eps = 6
if bounds is None and start is None:
raise ValueError("NoneError")
elif not start is None and bounds is None:
i = 1
while i < maxiter:
bound=[]
bound.append(start - i * eps)
bound.append(start + i * eps)
if (targetfunction(bound[0]) - target) * (targetfunction(bound[1]) - target) <= 0:
n = 1
series=[]
medium = (bound[1] + bound[0]) * 0.5
while n < maxiter:
medium = (bound[1] + bound[0]) * 0.5
series.append(medium)
if targetfunction(medium) == target or abs(bound[1] - bound[0]) * 0.5 < tols:
return series
else:
n = n + 1
if targetfunction(bound[1]) > target:
if (targetfunction(medium)) < target:
bound[0] = medium
else:
bound[1] = medium
elif targetfunction(bound[1]) < target:
if (targetfunction(medium)) < target:
bound[1] = medium
else:
bound[0] = medium
return series
elif (targetfunction(bound[0]) - target) * (targetfunction(bound[1]) - target) > 0:
grid = np.arange(bound[0], bound[1], cps)
num = (bound[1] - bound[0]) / cps
Max = max(targetfunction(grid))
Min = min(targetfunction(grid))
if (Max-target) * (Min-target) < 0:
count = 1
while count < num:
if (targetfunction(bound[0] + count * cps) - target) * (targetfunction(bound[0] - target) < 0):
bound[0] = bound[0] + count * cps
return bound[0]
else:
count += 1
n = 1
series=[]
medium = (bound[1] + bound[0]) * 0.5
while n < maxiter:
medium = (bound[1] + bound[0]) * 0.5
series.append(medium)
if targetfunction(medium) == target or abs(bound[1] - bound[0]) * 0.5 < tols:
return series
else:
n = n + 1
if targetfunction(bound[1]) > target:
if (targetfunction(medium)) < target:
bound[0] = medium
else:
bound[1] = medium
elif targetfunction(bound[1]) < target:
if (targetfunction(medium)) < target:
bound[1] = medium
else:
bound[0] = medium
else:
i = i + 1
if i == maxiter + 1:
raise StopIteration("can not find solutions")
elif start is None and not bounds is None:
if (targetfunction(bounds[0]) - target) * (targetfunction(bounds[1]) - target) <= 0:
n = 1
series = []
medium = (bounds[1] + bounds[0]) * 0.5
while n < maxiter:
medium = (bounds[1] + bounds[0]) * 0.5
series.append(medium)
if targetfunction(medium) == target or abs(bounds[1] - bounds[0]) * 0.5 < tols:
return series
else:
n = n + 1
if targetfunction(bounds[1]) > target:
if (targetfunction(medium)) < target:
bounds[0] = medium
else:
bounds[1] = medium
elif targetfunction(bounds[1]) < target:
if (targetfunction(medium)) < target:
bounds[1] = medium
else:
bounds[0] = medium
return series
elif (targetfunction(bounds[0]) - target) * (targetfunction(bounds[1]) - target) > 0:
grid = np.arange(bounds[0], bounds[1], cps)
num = (bounds[1] - bounds[0]) / cps
Max = max(targetfunction(grid))
Min = min(targetfunction(grid))
if Max * Min < 0:
count = 1
while count < num:
if (targetfunction(bounds[0] + count * cps) - target) * (targetfunction(bounds[0] - target) < 0):
bounds[0] = bounds[0] + count * cps
return bounds[0]
else:
count += 1
n = 1
series=[]
medium = (bounds[1] + bounds[0]) * 0.5
while n < maxiter:
medium = (bounds[1] + bounds[0]) * 0.5
series.append(medium)
if targetfunction(medium) == target or abs(bounds[1] - bounds[0]) * 0.5 < tols:
return series
else:
n = n + 1
if targetfunction(bounds[1]) > target:
if (targetfunction(medium)) < target:
bounds[0] = medium
else:
bounds[1] = medium
elif targetfunction(bounds[1]) < target:
if (targetfunction(medium)) < target:
bounds[1] = medium
else:
bounds[0] = medium
return series
else:
print("no solution for this bounds")
if __name__=="__main__":
f= lambda x: x ** 2 + x * 4 - 5
print(bisect(target = 0, targetfunction= f, start = -6, bounds = None, tols = 0.001, maxiter = 1000))
| UTF-8 | Python | false | false | 6,707 | py | 94 | Bisect.py | 45 | 0.39347 | 0.371552 | 0 | 144 | 44.423611 | 119 |
liaochangjiang/wbot-cs | 4,449,586,169,424 | 5d0ec78d8dd9fd6d7dd30f75ac6af22ea7046fa7 | ac9ce0412a1390988850afc0ee968f608bdb7c42 | /src/apps/qrlogin/urls.py | 4bc93105b4eb364727c4050bc64de264a412374e | []
| no_license | https://github.com/liaochangjiang/wbot-cs | 91f04b17c748aa53b5b6f8b3689402d39fe0f0a9 | a767cb4dd11e76fa3f8aef6856d3685a048e657f | refs/heads/master | 2018-11-22T02:49:04.677764 | 2018-10-24T14:27:50 | 2018-10-24T14:27:50 | 133,817,479 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
app_name = 'qrlogin'
urlpatterns = [
path('gene',views.gene,name='gene'),
path('check',views.check,name='check')
] | UTF-8 | Python | false | false | 172 | py | 107 | urls.py | 85 | 0.680233 | 0.680233 | 0 | 8 | 20.625 | 42 |
bluebrid/base-knowledge | 17,025,250,404,418 | dc85489e61b93fd979d1849be629c041409bdd4e | 10c605f7b3463769a5f326d20f15df71aaf760cc | /python/testingTodoApp/features/lib/pages/login_page.py | 53ee12593de5a7f0cff7739d97f1e8592c017dad | []
| no_license | https://github.com/bluebrid/base-knowledge | b10a50b0c1372e761ed6a73926260869edf42fa3 | 65c763874fe1b0c6af377e85a9fb4b776caf2b13 | refs/heads/master | 2023-03-11T19:54:45.391536 | 2023-02-23T10:04:00 | 2023-02-23T10:04:00 | 160,443,639 | 4 | 0 | null | false | 2023-03-03T18:06:33 | 2018-12-05T01:45:37 | 2023-01-31T17:08:09 | 2023-03-03T18:06:33 | 108,385 | 2 | 0 | 141 | JavaScript | false | false | from .base_page_object import BasePage
from selenium.webdriver.common.by import By
#import time
class LoginPage(BasePage):
def __init__(self, context):
BasePage.__init__(self, context)
sheetName = "usersiInfo"
locator_dictionary = {
"loginTypeBtn": (By.ID, "com.wunderkinder.wunderlistandroid:id/LoginButton"),
"emailInput": (By.ID, "com.wunderkinder.wunderlistandroid:id/login_email_edittext"),
"passwordInput": (By.ID, "com.wunderkinder.wunderlistandroid:id/login_password_edittext"),
"loginBtn": (By.ID, "com.wunderkinder.wunderlistandroid:id/login_button")
}
def reset(self):
# send_keys 是在原有的基础上进行append
self.emailInput.clear()
self.passwordInput.clear()
def login(self, username='', password=''):
self.reset()
self.emailInput.send_keys(username)
self.passwordInput.send_keys(password)
#time.sleep(10)
self.loginBtn.click() | UTF-8 | Python | false | false | 987 | py | 1,914 | login_page.py | 1,232 | 0.660807 | 0.658738 | 0 | 25 | 37.72 | 98 |
miguelvitores/tfg-code | 1,013,612,328,525 | 8e74a478c0446c65166621268f0ccc8ae228a5b6 | 666a61619aca28e16076751bd2a02f88ee8fbedc | /test/test_testdata.py | 4d6414fe93d50eb8b6e372a1b33f9c0de12fbe39 | []
| no_license | https://github.com/miguelvitores/tfg-code | bf835bb22a6f5458cce441ede88fdad23511fb7b | 9d19b4c983f9f5cda5c320a669004c125a78852a | refs/heads/master | 2023-01-02T11:30:11.345798 | 2020-10-21T11:53:28 | 2020-10-21T11:53:28 | 267,086,205 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from bin.algoritmos.busq.busquedainterpolacion import BusquedaInterpolacion
from bin.algoritmos.ord.ordenacionquicksort import OrdenacionQuicksort
from bin.testdata.rango import RangoVal, RangoTam
from bin.testdata.td_tipos import TestDataOrdenacionLOEquidistante
from bin.testdata.td_tipos import TestDataBusquedaLOEquidistante
class EditarTestdata(unittest.TestCase):
oq = OrdenacionQuicksort()
bi = BusquedaInterpolacion()
rt = RangoTam(40, 10, 2.5)
rv = RangoVal(180, 20, 5)
def test_editar_algoritmo_busqueda_previo_analisis(self):
td = TestDataBusquedaLOEquidistante(self.bi)
td.analizar()
td.editar_algoritmo(self.oq)
self.assertEqual(len(td.resultados), 0)
self.assertIs(td.algoritmo, self.oq)
def test_editar_algoritmo_ordenacion_previo_analisis(self):
td = TestDataOrdenacionLOEquidistante(self.oq)
td.analizar()
td.editar_algoritmo(self.bi)
self.assertEqual(len(td.resultados), 0)
self.assertIs(td.algoritmo, self.bi)
def test_editar_rangot_busqueda_previo_analisis(self):
td = TestDataBusquedaLOEquidistante(self.bi)
td.analizar()
td.editar_rangot(self.rt)
self.assertEqual(len(td.resultados), 0)
self.assertIs(td.rangot, self.rt)
def test_editar_rangot_ordenacion_previo_analisis(self):
td = TestDataOrdenacionLOEquidistante(self.oq)
td.analizar()
td.editar_rangot(self.rt)
self.assertEqual(len(td.resultados), 0)
self.assertIs(td.rangot, self.rt)
def test_editar_rangov_busqueda_previo_analisis(self):
td = TestDataBusquedaLOEquidistante(self.bi)
td.analizar()
td.editar_rangov(self.rv)
self.assertEqual(len(td.resultados), 0)
self.assertIs(td.rangov, self.rv)
def test_editar_rangov_ordenacion_previo_analisis(self):
td = TestDataOrdenacionLOEquidistante(self.oq)
td.analizar()
td.editar_rangov(self.rv)
self.assertEqual(len(td.resultados), 0)
self.assertIs(td.rangov, self.rv)
class Recalcular(unittest.TestCase):
oq = OrdenacionQuicksort()
bi = BusquedaInterpolacion()
n_veces = 4
def test_recalcular_busqueda(self):
td = TestDataBusquedaLOEquidistante(self.bi)
td.recalcular(self.n_veces)
self.assertGreater(len(td.resultados), 0)
def test_recalcular_ordenacion(self):
td = TestDataOrdenacionLOEquidistante(self.oq)
td.recalcular(self.n_veces)
self.assertGreater(len(td.resultados), 0)
def test_recalcular_busqueda_previo_analisis(self):
td = TestDataBusquedaLOEquidistante(self.bi)
td.analizar()
td.recalcular(self.n_veces)
self.assertGreater(len(td.resultados), 0)
def test_recalcular_ordenacion_previo_analisis(self):
td = TestDataOrdenacionLOEquidistante(self.oq)
td.analizar()
td.recalcular(self.n_veces)
self.assertGreater(len(td.resultados), 0)
| UTF-8 | Python | false | false | 3,022 | py | 41 | test_testdata.py | 38 | 0.692919 | 0.685308 | 0 | 85 | 34.552941 | 75 |
realsaeedhassani/mpkg-school-info | 7,490,422,984,361 | b93af8983d4c18ad24e18c6fe87244cb3a51ff08 | 18528998378eb3ecbef266eeac22e4f6dbb33de0 | /makemigrations_my_app.py | d999f8978f3015561545a229f1a4539db6365f06 | [
"MIT"
]
| permissive | https://github.com/realsaeedhassani/mpkg-school-info | 2b8ded6ebbf4c07f92993b55773eb70f3ddbab34 | a7f8bd600c0d24043ea77353b69dfffa8d4087d9 | refs/heads/main | 2023-03-06T00:14:25.101258 | 2021-02-07T12:03:27 | 2021-02-07T12:03:27 | 336,756,335 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # school_info/makemigrations_my_app.py
from django.core.management import call_command
from boot_my_app import boot_my_app
boot_my_app()
# python manage.py makemigrations my_app
call_command("makemigrations", "my_app")
# python manage.py migrate
call_command("migrate")
| UTF-8 | Python | false | false | 274 | py | 16 | makemigrations_my_app.py | 8 | 0.770073 | 0.770073 | 0 | 12 | 21.833333 | 47 |
maddogsuklee/robot-project-hit | 12,120,397,717,786 | fd7bf0528ad1a54789374bb5324a1fa637d76183 | 8dc74b8194286670db191a16e319afb3c1481d5d | /GUI.py | d8f0f4ca274caf39fb2976c0a4280cd92f7fc762 | []
| no_license | https://github.com/maddogsuklee/robot-project-hit | 8b37f4d357744f760eccce2234355b909bc72152 | f3f367807613d95ae915efd4f37ae5ea7a19b004 | refs/heads/main | 2023-05-07T18:13:52.645962 | 2021-05-28T18:07:33 | 2021-05-28T18:07:33 | 371,782,390 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
from Who_is_it import find_face_def
from new_face import add_face
class GuiClass:
def __init__(self, main):
# build GUI frame
self.main = main
self.frame = tk.Frame(self.main)
self.main.title("Control panel")
self.main.geometry("500x100")
# add buttons - find face and add new face
self.Find_face_btn = tk.Button(self.main, text="Who is it?", command=self.find_face_gui)
self.Find_face_btn.pack(side=tk.TOP)
self.add_new_face_btn = tk.Button(self.main, text=" Add new face ", command=self.add_new_face_def)
self.add_new_face_btn.pack(side=tk.TOP)
# add comment for the user
self.name_face = tk.Entry(self.main)
self.name_face.pack()
self.label_info = tk.Label(self.main, text="")
self.label_info.pack()
# call find face function from button
def find_face_gui(self):
find_face_def()
# call add face function from button
def add_new_face_def(self):
# make sure input is valid
if self.name_face.get() != "" and self.name_face.get().replace(" ", "").isalpha():
self.label_info['text'] = add_face(self.name_face.get().lower())
else:
self.label_info['text'] = "Name is invalid - name should be only letters"
root = tk.Tk()
GUI = GuiClass(root)
root.mainloop()
| UTF-8 | Python | false | false | 1,394 | py | 4 | GUI.py | 4 | 0.604735 | 0.60043 | 0 | 41 | 33 | 106 |
Ariel96cs/SimpleWebCrawler | 5,849,745,474,022 | e466030804fe8c5693856b1cef106e83165a1d13 | 7d483fb210c9874840ae662d9ec5433c9c383c6f | /sicrawler/spiders/myspider.py | 28da2d12e104f2ea4a234e5485eda8a1dff74665 | []
| no_license | https://github.com/Ariel96cs/SimpleWebCrawler | 16a2d29174f9272230166c095696bbcb7b252d51 | dba03bea9bad8b34e7fb16d9388ae3733aba9cce | refs/heads/master | 2022-11-25T07:02:25.905310 | 2020-07-22T17:13:55 | 2020-07-22T17:13:55 | 281,738,042 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
from scrapy.http import Response
from ..items import SicrawlerItem
class MySpider(scrapy.Spider):
name = 'myspider'
allowed_domains = ['foundation.wikimedia.org',
'wikimediafoundation.org',
'www.mediawiki.org',
'meta.wikimedia.org',
'creativecommons.org',
'en.wikipedia.org',
'phl.upr.edu',
'www.tibetanyouthcongress.org',
'www.studentsforafreetibet.org',
'jewishamericansongster.com',
'www.jta.org',
'www.klezmershack.com',
'www.acsmedchem.org',
'www.nap.edu',
'www.formula1.com',
'www.fifa.com',
'newrepublic.com',
'politicalticker.blogs.cnn.com',
'www.hollywoodreporter.com',
'www.nydailynews.com',
'mobile.nytimes.com',
'www.espn.com',
'www.newsweek.com',
'money.cnn.com',
'apnews.com',
'www.economist.com',
'www.cnbc.com',
'www.vox.com',
'www.nbcnews.com',
'www.donaldjtrump.com',
'www.newspapers.com',
'donaldjtrump.com',
'whitehouse.gov',
'keras.io'
]
start_urls = ['https://en.wikipedia.org/wiki/Mathematics',
'https://en.wikipedia.org/wiki/Harry_Potter',
'https://en.wikipedia.org/wiki/Donald_Trump',
'https://en.wikipedia.org/wiki/Breast_cancer',
'https://en.wikipedia.org/wiki/Programming_language',
'https://en.wikipedia.org/wiki/Leonardo_da_Vinci',
'https://en.wikipedia.org/wiki/Sport',
'https://en.wikipedia.org/wiki/Convolutional_neural_network'
]
def parse(self,response:Response):
# urlfile = open('urls', 'w', encoding='utf-8')
item = SicrawlerItem()
item['url'] = response.url
item['body'] = response.css('p::text').getall()
# item['body'] = response.body
yield item
links = response.css('a::attr(href)').getall()
# print(f'La longitud de links es : {len(links)}')
# print(response.urljoin(links[0]))
for a in links:
if a == '/': continue
urljoined = response.urljoin(a)
# urlfile.write(urljoined+'\n')
yield scrapy.Request(urljoined, callback=self.parse)
# urlfile.close()
| UTF-8 | Python | false | false | 2,893 | py | 8 | myspider.py | 4 | 0.453854 | 0.452817 | 0 | 71 | 39.746479 | 78 |
ChrisKai/qiubai | 6,700,149,030,706 | 05ba2acd2f5caebf53f5ac769ebeccaa413f43b9 | 6494c2cb0e3e8d1379ad85b0fa50b01232862a9c | /qiubai/qiubai/agents.py | 333370c854e01e51982b0689fb82ae90df197f8f | []
| no_license | https://github.com/ChrisKai/qiubai | ad3b95fa254dd17eb0c7b7ce93f343fe65c6b2f5 | 2e2ce4ff769b4ad57c526e9046013efd2916ee14 | refs/heads/master | 2021-05-01T01:57:56.352748 | 2017-02-22T14:01:21 | 2017-02-22T14:01:21 | 79,871,838 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding=utf-8
""" User-Agents """
agents = [
"Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36"
]
| UTF-8 | Python | false | false | 194 | py | 8 | agents.py | 7 | 0.680412 | 0.530928 | 0 | 5 | 37.8 | 143 |
imadmallahi/ImageMining | 11,965,778,916,327 | ac6258242380401a25298455cec4d4d308e3a513 | ce6adb91654ba2a9fb5dfe05e1677017b15005b9 | /ClassificationCar/classifier.py | afebe9ace59a61b8a925765e656a77bbf572967c | []
| no_license | https://github.com/imadmallahi/ImageMining | 600c61f141ccd8e9579205de0a83bcac5c222292 | 94f4a8e1efb1a991bd300bc4ce219c60c25d3892 | refs/heads/master | 2023-01-31T23:45:29.926505 | 2020-12-07T14:18:10 | 2020-12-07T14:18:10 | 319,310,230 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 19:40:53 2019
@author: pc
"""
import csv
from sklearn import svm
from getFeatures import ColorDescriptor
import cv2
import glob
import imageio
import matplotlib.pyplot as plt
X=[]
Y=[]
with open("DB2C/obj.csv") as f:
reader = csv.reader(f)
for row in reader:
for vectDescripteur in row[1:]:
vectDescripteur=eval(vectDescripteur)
# results[row[0]] = d
X.append(vectDescripteur)
print(row[0])
if row[0]=='car':
Y.append("car")
else:
Y.append("ship")
f.close()
cl=svm.LinearSVC()
#X=[[1,0],[0,1],[0,0],[1,1]]
#Y=[0,0,0,1]
cl.fit(X,Y)
a=glob.glob('DataToPredict/*.jpg')
cd = ColorDescriptor((8, 12, 3))
restltat=[]
for f in a:
image=cv2.imread(f)
features = cd.describe(image)
print(f, cl.predict([features]))
restltat.append({'image':f,'predict':cl.predict([features])})
img = imageio.imread(f)
plt.imshow(img)
plt.show()
for row in restltat:
print(row['image'].split('\\')[1])
print(row['predict'])
csv_filename = 'Prediction_Atelier2_corelDB_Imade.csv'
with open(csv_filename, 'a', newline='') as csvfile:
fieldnames = ['Name', 'Classe']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in restltat:
filename = row['image'].split('\\')[1]
classname = row['predict']
writer.writerow({'Name' : filename, 'Classe' : classname})
| UTF-8 | Python | false | false | 1,677 | py | 37 | classifier.py | 9 | 0.543232 | 0.519976 | 0 | 64 | 25.1875 | 70 |
Tulki/advent | 15,814,069,615,037 | 254798f2f25425a2a6402235ed0fb5266d761fe7 | 441a815c0f47f48b9cb90ef490fe6e9f606bc492 | /2020/day13.py | 710df8bf5da381219f04222e54c1ecf3959484ec | []
| no_license | https://github.com/Tulki/advent | dcd0532de38b37789fb8d588768ed1845ce85ea6 | d054c5f4d26a124aa5565628d18d0dcab50a7b7b | refs/heads/master | 2022-12-15T12:56:16.359510 | 2022-12-07T04:49:10 | 2022-12-07T04:49:10 | 75,567,678 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from shared import *
def waitTime(earliestTime, busID):
rem = earliestTime % busID
if rem > 0:
return busID - rem
return rem
# Part A
def solveA():
schedule = split_lines('day13.input')
earliestTime = (int)(schedule[0])
busIDs = [(int)(x) for x in schedule[1].split(',') if x != 'x']
waitTimes = [waitTime(earliestTime, busID) for busID in busIDs]
minWait = min(waitTimes)
minBusID = busIDs[waitTimes.index(minWait)]
return minWait * minBusID
# Part B
def checkOffset(time, busID, offset):
if busID == 'x':
return True
return (time + offset) % busID == 0
def checkOffsets(time, busIDs):
correctOffsets = 0
for i in range(len(busIDs)):
check = checkOffset(time, busIDs[i], i)
if not check:
# Short circuiting will save us time.
return correctOffsets
else:
correctOffsets += 1
return correctOffsets
def computeLCM(busIDs, correctOffsetCount):
correctIDs = busIDs[:correctOffsetCount]
correctIDs = [ID for ID in correctIDs if ID != 'x']
LCM = 1
for ID in correctIDs:
LCM *= ID
return LCM
def solveB():
schedule = split_lines('day13.input')
busIDs = ['x' if busID == 'x' else (int)(busID) for busID in schedule[1].split(',')]
# Skipping timestamps based on the LCM of checked busIDs will save us time.
LCM = 1
time = 0
correctOffsets = 0
while correctOffsets < len(busIDs):
time += LCM
print(time)
correctOffsets = checkOffsets(time, busIDs)
newLCM = computeLCM(busIDs, correctOffsets)
if newLCM > LCM:
LCM = newLCM
return time
| UTF-8 | Python | false | false | 1,697 | py | 31 | day13.py | 31 | 0.610489 | 0.60165 | 0 | 65 | 25.107692 | 88 |
williamespander/NUMA01 | 13,572,096,677,476 | 5491edc48b697b52e531956fc7ccbe6283d79cf2 | ffa36d8483ace18676a24f54974767325c52012a | /FINALBirds.py | 7a43d1f1b733071d20903587cc13cb93bbbca4e4 | []
| no_license | https://github.com/williamespander/NUMA01 | c3fdd0f32efafe7f5129b45e1a43f09d318b69bd | a0a293a36c6cdc3584a0df9aca21b06dc400d34e | refs/heads/main | 2023-05-10T14:16:09.134741 | 2021-05-25T20:33:15 | 2021-05-25T20:33:15 | 366,098,637 | 0 | 1 | null | false | 2021-05-13T21:26:59 | 2021-05-10T15:56:57 | 2021-05-12T17:42:30 | 2021-05-13T21:26:58 | 7,708 | 0 | 1 | 0 | Python | false | false | # -*- coding: utf-8 -*-
"""
Created on Tue May 25 15:35:54 2021
@author: claud
"""
import datetime
import pytz
import numpy
import os
from matplotlib.pyplot import *
from astral import *
# ASTRAL CAN BE USED FOR SUNRISE/SUNSET TIMES.
# import astralaa
class birds:
'''
Class processes a datafile which registers in- and out-movements of
a bird at a nesting box. The data in the file contains lines of the
type:
2015-03-01 14:22:05.911302 2072
with the date, the time in UTC and the total number of registered
movements so far.
'''
def __init__(self, textfile):
self.data = []
# Opening file and appending list "self.data" with lists that
# contain the date in datetime format and the number of movements
# in int data type. Datetime objects are converted from
# UTC timezone to CET timezone. The instantiation raises an
# exception on one line in which the data was corrupted.
# ISSUE: I JUST TRIED TO MAKE THIS LOOK A BIT PRETTIER BUT IT
# DOESN'T WORK FOR WHATEVER REASON. LESS PRETTY, BUT
# FUNCTIONAL CODE BELOW.
# with open(textfile, "r") as txt:
# for line in txt:
# try:
# self.data.append
# ([pytz.utc.localize(datetime.datetime.strptime
# (line.split(sep=" ", maxsplit=1)
# [0], '%Y-%m-%d %H:%M:%S.%f'))
# .astimezone(pytz.timezone('Europe/Stockholm'))
# ,int(line.split(sep=" ", maxsplit=1)[1])])
# except ValueError:
# pass
with open(textfile, "r") as txt:
# Source: Google Maps
for line in txt:
try:
self.data.append([pytz.utc.localize(
datetime.datetime.strptime(line.split(sep=" ",
maxsplit=1)[0]
,'%Y-%m-%d %H:%M:%S.%f'))
.astimezone(pytz.timezone('Europe/Stockholm')),
int(line.split(sep=" ", maxsplit=1)[1])
])
except ValueError:
pass
def preprocess(self):
'''
Method preprocesses the datafile. Data corruption were counts are
reported incompletely are replaced. If lines are missing, they are
added. If there are more than 4 movements registered per minute,
they are changed to 0.
PARAMETERS
self: (.txt file)
A textfile with dates, times and numbers that correspond to
movements of Birds. Datetime must be in format
'%Y-%m-%d %H:%M:%S.%f' and the corresponding integer should
be separated by spaces. Each datapoint has to be separated by
a new line.
RETURNS
-------
None.
'''
# Sorting the data chronologically
self.data.sort(key=lambda x: x[0])
# If the items that come before or after a certain index in the
# list of number of registered movements are the same but the
# middle value is different, the middle value is changed to match
# the previous value. IndexErrors are passed.
for i in range(len(self.data)):
try:
if (self.data[i-1][1] == self.data[i+1][1] and
self. data[i][1] != self.data[i-1][1]):
self.data[i][1] = self.data[i-1][1]
except IndexError:
pass
# If the absolute value of the difference between the next and the
# current item in the list is less than or equal to 8, the current
# item takes on the value of the difference. Otherwise, the
# current value is set to 0. IndexErrors are passed.
# ISSUE: THE CURRENT CODE ASSIGNS THE VALUE TO THE EARLIER TIME
# BUT IT WOULD BE MORE INTUITIVE IF THE VALUE WAS ASSIGNED
# TO THE LATER TIME. WE SHOULD ALSO CONSIDER CHANGING THE
# VALUES TO 8 INSTEAD OF 0 IF THE NUMBER IS PRETTY CLOSE TO
# 8.
for i in range(len(self.data)):
maxmv = 20
try:
if (self.data[i+1][1] - self.data[i][1] <= maxmv and
self.data[i+1][1] - self.data[i][1] >= 0):
self.data[i][1] = abs(self.data[i+1][1] -
self.data[i][1])
else:
if (self.data[i+1][1] - self.data[i][1] > maxmv):
self.data[i][1] = maxmv
else:
self.data[i][1] = 0
except IndexError:
self.data[i][1] = 0
# Removes and adds lines based on timedeltas which are too low or
# high.
# ISSUE: FOR SOME REASON NO ITEMS ARE DELETED WHILE 20-30K ITEMS
# ARE ADDED IN THE LATTER PART.
i = 0
while (i <= len(self.data)):
try:
if (self.data[i][0] + datetime.timedelta
(minutes=0, seconds=0) > self.data[i+1][0]):
# if (self.data[i][1] != 0):
# pass
# else:
del self.data[i+1]
elif (self.data[i][0] + datetime.timedelta(minutes=3) <
self.data[i+1][0]):
self.data.insert(i + 1, [self.data[i][0] +
datetime.timedelta(minutes=2)
, self.data[i][1]])
except IndexError:
pass
i += 1
Latitude = 55.72
Longitude = 13.35
Dawn = []
Dusk = []
daytime = Astral()
i = 0
while (i < len(self.data)+1):
try:
DATA = daytime.sun_utc(date = (self.data[i][0])
.astimezone(
pytz.timezone(
'Europe/Stockholm')),
latitude = 55.72,
longitude=13.35)
Dawn.append(DATA['dawn'])
Dusk.append(DATA['dusk'])
except:
pass
i+=1
Color = np.zeros([len(self.data)])
T = [self.data[i][0] for i in range(len(self.data))]
for i in range(len(self.data)):
try:
if Dawn[i]<T[i] and T[i]<Dusk[i]:
Color[i] = 1 #Day
else:
Color[i] = 0 #Night
except Exception as E:
pass
self.Color = Color
# Plots a graph within a given interval.
def plot(self, startdate, days):
'''
Method plots movements in the bird nest within a given time
interval and outputs the plot.
PARAMETERS
self: (.txt file)
A textfile with dates, times and numbers that correspond to
movements of Birds. Datetime must be in format
'%Y-%m-%d %H:%M:%S.%f' and the corresponding integer should
be separated by spaces. Each datapoint has to be separated by
a new line.
startdate: (datetime.datetime object)
A datetime object that sets the starting date for the plot.
days: (datetime.timedelta object)
A timedelta object that sets the end date for the plot as the
number of days from the startdate.
RETURNS
-------
None.
'''
# Finds the list index of the startdate in self.data.
llimit = 0
ulimit = len(self.data)
while (True):
date = int((llimit + ulimit) / 2)
if (date == 0):
startIndex = 0
break
if (date == ulimit - 1):
startIndex = ulimit - 1
break
if (self.data[date - 1][0] < startdate
< self.data[date + 1][0]):
startIndex = date
break
elif startdate < self.data[date][0]:
ulimit = date
elif self.data[date][0] < startdate:
llimit = date
# The enddate is set to the startdate + the number of days.
endDate = self.data[startIndex][0] + days
# Finds the list index of the enddate.
endIndex = startIndex
while self.data[endIndex][0] < endDate:
endIndex += 1
# Sets x-values to matplotlib.dates.date2num datatype and creates
# x-axis.
x_values = [matplotlib.dates.date2num(self.data[i][0]) for i in
range(startIndex, endIndex)]
# Sets y-values to number of movements and matches dates.
y_values = [self.data[i][1] for i in range(startIndex - 1,
endIndex - 1)]
matplotlib.pyplot.plot_date(x_values, y_values, label="Movement"
, marker=".", markersize="5", linewidth=3, linestyle='-')
if days <= datetime.timedelta(days = 30):
Colors = self.Color[startIndex-1 : endIndex-1]
for i in range(len(Colors)-1):
if Colors[i] == 1:
matplotlib.pyplot.axvspan(xmin = x_values[i], xmax =
x_values[i+1] ,
ymax = max(y_values),
color = 'red', alpha = 0.1)
else:
pass
else:
print("Won't print day/night for intervals greater than "
"30 days.")
title("."); legend(); xticks(rotation=45); xlabel(" ");
ylabel(" ")
show()
def UI(self):
'''
Method starts a UI that prompts the user for input for the plots.
The method finally calls the "plot" method and outputs a plot.
PARAMETERS
self: (.txt file)
A textfile with dates, times and numbers that correspond to
movements of Birds. Datetime must be in format
'%Y-%m-%d %H:%M:%S.%f' and the corresponding integer should
be separated by spaces. Each datapoint has to be separated by
a new line.
Returns
-------
None.
'''
# Prompts the user for a start date.
while True:
promptStart = input("Please enter a starting date.")
try:
if len(promptStart) == 19:
promptStart = promptStart + ".000000"
elif len(promptStart) == 10:
promptStart = promptStart + " 00:00:00.000000"
promptStart = pytz.utc.localize(datetime.datetime.strptime
(promptStart,'%Y-%m-%d %H:%M:%S.%f')
).astimezone(pytz.timezone(
'Europe/Stockholm'))
except ValueError:
print("Dates must be in format: YYYY-MM-DD HH:MM:SS")
continue
if (promptStart < self.data[0][0] or
self.data[-1][0] < promptStart):
print("Startdate must be between 2015-01-25 14:05:41.274647 "
+ "and 2016-01-16 17:22:10.171150")
continue
else:
break
#Prompts the user for number of days.
while True:
try:
promptDays = int(input("How many days after the start date"
+ " do you wish to plot?"))
except ValueError:
print("You must enter a number.")
continue
if promptDays <= 0:
print("You must enter a positive value.")
continue
promptDays = datetime.timedelta(days = promptDays)
if ((self.data[-1][0]) < (promptStart + promptDays)):
print("The file doesn't measure beyond 2016-01-16 " +
"17:22:10.171150. Plot will only include dates up to" +
" this point.")
continue
else:
break
self.plot(promptStart, promptDays)
birdsData = birds(r"C:\Users\willi\Documents\GitHub\NUMA01\bird_jan25jan16.txt")
birdsData.preprocess()
birdsData.UI()
#ställen där de går bakåt i tiden
#2016-01-12 22:18:05.547619 0
#2015-08-02 07:00:08.274214 24798
#2015-08-02 06:18:17.747048 0
#BirdsData.Step1()
#print(BirdsData.data[0:12500])
#datumet: 2015, 2, 9, 8, 10
#2015-02-09 07:08:05.410445 802
#2015-02-09 07:10:05.219535 02
#2015-02-09 07:12:05.254474 802
#238746
| UTF-8 | Python | false | false | 13,643 | py | 7 | FINALBirds.py | 7 | 0.46653 | 0.435956 | 0 | 346 | 37.419075 | 93 |
biddyweb/icruits | 3,229,815,454,678 | 84d007cfab5e9cf8ffe5fd1d0209d3d3a106dba2 | 4adc69ccec096285af3882ad659cbd13eaa273f4 | /libs/djadmin/models.py | 32bbd2fa7928e68acdd8f0fd1fda156a1b765c5f | []
| no_license | https://github.com/biddyweb/icruits | 8d2b56e4e9c00641f5af0885732788e1ae29e658 | eb72e4e89f153dd9d076e84f9c522a582a5e167f | refs/heads/master | 2020-03-28T07:41:35.085813 | 2018-05-26T19:03:37 | 2018-05-26T19:03:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.db import models
from django.utils.translation import ugettext as _
LIST_PAGE, FORM_PAGE, = 0, 1
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Visitor(models.Model):
name = models.ForeignKey(AUTH_USER_MODEL, null=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=50, null=True)
country = models.CharField(max_length=50, null=True)
visit_datetime = models.DateTimeField(auto_now=True)
browser = models.CharField(max_length=30, null=True)
browser_version = models.CharField(max_length=20, null=True)
ipaddress = models.CharField(max_length=20, null=True)
os_info = models.CharField(max_length=30, null=True)
os_info_version = models.CharField(max_length=20, null=True)
device_type = models.CharField(max_length=20,
null=True)
device_name = models.CharField(max_length=20, null=True)
device_name_brand = models.CharField(max_length=20, null=True)
device_name_model = models.CharField(max_length=20, null=True)
unique_computer_processor = models.CharField(max_length=255, null=True)
class Meta:
ordering = ['visit_datetime']
verbose_name = _("visitor")
verbose_name_plural = _("visitors")
def __str__(self):
return self.os_info
def __unicode__(self):
return self.os_info
class DjadminField(models.Model):
name = models.CharField(_('Field Name'), max_length=255)
type = models.CharField(_('Field Type'), max_length=30)
model = models.CharField(_('Model Name'), max_length=50)
depth = models.IntegerField(_('Field Depth'))
foreignkey_model = models.CharField(_('Foreign Key Model Name'), max_length=50, null=True, blank=True)
class Meta:
db_table = 'djadmin_field'
verbose_name = _('Djadmin Field')
verbose_name_plural = _('Djadmin Fields')
def __str__(self):
return self.name
class DjadminModelSetting(models.Model):
model = models.CharField(_('Model Name'), max_length=50)
app_label = models.CharField(_('Model App Label Name'), max_length=50)
list_display = models.ManyToManyField(DjadminField, verbose_name="List Display", related_name='list_display',
help_text="Set list_display to control which fields are displayed on the change list page of the admin."
, blank=True)
list_display_links = models.ManyToManyField(DjadminField, verbose_name="List Display Link",
related_name='list_display_links', blank=True,
help_text="Use list_display_links to control if and which fields in list_display should be linked to the change list page for an object.")
list_filter = models.ManyToManyField(DjadminField, verbose_name="List Filter", related_name='list_filter',
blank=True,
help_text="Set list_filter to activate filters in the right sidebar of the change list page of the admin")
list_per_page = models.IntegerField(_('List Per Page'), null=True, blank=True,
help_text="Set list_per_page to control how many items appear on each paginated admin change list page.")
list_max_show_all = models.IntegerField(_('List Max Show All'), null=True, blank=True,
help_text="Set list_max_show_all to control how many items can appear on a 'Show all' admin change list page.")
list_editable = models.ManyToManyField(DjadminField, verbose_name="List Editable", related_name='list_editable',
blank=True,
help_text="Set list_editable to a list of field names on the model which will allow editing on the change list page.")
search_fields = models.ManyToManyField(DjadminField, verbose_name="Search Fields", related_name='search_fields',
blank=True,
help_text="Set search_fields to enable a search box on the admin change list page.")
date_hierarchy = models.ForeignKey(DjadminField, related_name='date_hierarchy', blank=True, null=True,
help_text="Set date_hierarchy to the name of a DateField or DateTimeField in your model, and the change list page will include a date-based drilldown navigation by that field.")
class Meta:
db_table = 'djadmin_model_setting'
ordering = ['model']
verbose_name = _('Djadmin Model Setting')
verbose_name_plural = _('Djadmin Model Settings')
def __unicode__(self):
return self.model
class DjadminCard(models.Model):
LOCATION_CHOICES = (
(LIST_PAGE, 'LIST PAGE'),
(FORM_PAGE, 'FORM PAGE'),
)
model = models.ForeignKey(DjadminModelSetting)
name = models.CharField(_('Name of Card'), max_length=255)
html = models.TextField(_('HTML Code'))
location = models.SmallIntegerField(_('Select Location'), choices=LOCATION_CHOICES,
help_text='It will help to show this card on selected location for this model')
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['date_created']
verbose_name = _('Djadmin Card')
verbose_name_plural = _('Djadmin Cards')
def __str__(self):
return self.name
| UTF-8 | Python | false | false | 5,734 | py | 93 | models.py | 62 | 0.61301 | 0.604988 | 0 | 108 | 51.092593 | 216 |
TaiSakuma/AlphaTwirl | 16,630,113,412,878 | e5d278de49681218a37f3859b8900002119007f7 | 84b04da7303dda43ae9bbf17110c7dfd8b3d77b8 | /tests/unit/summary/test_Count.py | a1e44d7c9146de82f7b9ceff7898b6be64ec7fcb | [
"BSD-3-Clause"
]
| permissive | https://github.com/TaiSakuma/AlphaTwirl | 7ca4528853d7bc5a9450da507c7b43799b6d1823 | 94d08b19098ecbf1d90a022cd4ad9ba63edb3c06 | refs/heads/master | 2021-06-12T13:43:10.422029 | 2019-05-12T14:30:27 | 2019-05-12T14:30:27 | 30,841,569 | 1 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Tai Sakuma <tai.sakuma@gmail.com>
import numpy as np
import copy
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.summary import Count
##__________________________________________________________________||
@pytest.mark.parametrize('kwargs, expected_contents', (
(dict(), [np.array([0, 0])]),
(dict(val=()), [np.array([1, 1])]),
(dict(val=(), weight=10), [np.array([10, 100])]),
(dict(contents=[np.array([1, 3])]), [np.array([1, 3])]),
))
def test_init(kwargs, expected_contents):
obj = Count(**kwargs)
np.testing.assert_equal(expected_contents, obj.contents)
##__________________________________________________________________||
def test_repr():
obj = Count()
repr(obj)
##__________________________________________________________________||
def test_add():
obj1 = Count(contents=[np.array((10, 20))])
obj2 = Count(contents=[np.array((30, 40))])
obj3 = obj1 + obj2
np.testing.assert_equal([np.array([40, 60])], obj3.contents)
assert obj1 is not obj3
assert obj1.contents is not obj3.contents
assert obj2 is not obj3
assert obj2.contents is not obj3.contents
def test_radd():
obj1 = Count(contents=[np.array((10, 20))])
assert obj1 is not sum([obj1]) # will call 0 + obj1
assert obj1 == sum([obj1])
def test_radd_raise():
obj1 = Count(contents=[np.array((10, 20))])
with pytest.raises(TypeError):
1 + obj1
def test_copy():
obj1 = Count(contents=[np.array((10, 20))])
copy1 = copy.copy(obj1)
assert obj1 == copy1
assert obj1 is not copy1
assert obj1.contents is not copy1.contents
assert obj1.contents[0] is not copy1.contents[0]
##__________________________________________________________________||
| UTF-8 | Python | false | false | 1,783 | py | 241 | test_Count.py | 188 | 0.536736 | 0.494111 | 0 | 59 | 29.220339 | 70 |
davis-berlind/STA-561-Final-Project | 1,546,188,237,915 | b2664ddb28942a7aff0ef279cef8307336905b76 | bda0f571c41fabced968080c2a31555182936677 | /monthly_data.py | 9ffac7ca92cf357d7195c6c10f3cc82b70d51c40 | []
| no_license | https://github.com/davis-berlind/STA-561-Final-Project | 8e351c5181dc3328a3c6741a2760d0f821578025 | 7536fa67ba2a5f871e6244ca6f38928c2e5a8686 | refs/heads/master | 2020-05-03T05:30:35.573062 | 2019-04-27T05:32:55 | 2019-04-27T05:32:55 | 178,449,109 | 1 | 2 | null | false | 2019-04-12T16:29:30 | 2019-03-29T17:28:33 | 2019-04-12T13:50:40 | 2019-04-12T16:29:30 | 1,569 | 1 | 2 | 0 | Jupyter Notebook | false | false | from fred import Fred
import numpy as np
import pandas as pd
from datetime import datetime
import time
from quarterize import quarterMean, growthRate
from dateutil.relativedelta import relativedelta
# FRED API key
# key = 'enter your key'
%run -i fred_api_key
## Getting Data from FRED ##
# loading keys
fseries = pd.read_csv('FRED_data_series.csv')
data = pd.read_csv('data.csv')
# Intialize Fred object for communicating with FRED API
fr = Fred(api_key = key, response_type = 'df')
start = datetime(1990, 1, 1)
end = datetime(2018, 10, 2)
params = {'observation_start': start.strftime('%Y-%m-%d'),
'observation_end': end.strftime('%Y-%m-%d')}
dates = []
while start <= end:
dates.append(start.strftime('%Y-%m-%d'))
start += relativedelta(months = 1)
# Retrieving data from Fred
# number of series to collect
n = fseries.shape[0]
# initialize data frame for data collection
monthly_data = pd.DataFrame({'date' : dates[1:]})
# loop through and collect data for each code in fseries
i = 0
for col in data.columns[1:-4]:
method = int(fseries.loc[fseries.file == col, 'method'])
freq = fr.series.details(col).frequency_short[0]
series = fr.series.observations(col, params = params)
if ((freq == 'M') & bool(method)):
monthly_data[col] = np.diff(series.value)
elif(freq == 'M'):
monthly_data[col] = growthRate(series.value + 0.0001)
elif(bool(method)):
series.dropna(axis = 0, inplace = True)
ret = []
for start, end in zip(dates[:-1], dates[1:]):
# get chunk of data between start and end dates
batch = series.loc[(series.date >= start) & (series.date < end), 'value']
# calculate within period percent change
shrink = np.sum(np.diff(batch))
ret.append(shrink)
monthly_data[col] = ret
else:
series.dropna(axis = 0, inplace = True)
series = pd.DataFrame({'date' : series.date[1:], 'value': growthRate(series.value + 0.0001)})
ret = []
for start, end in zip(dates[:-1], dates[1:]):
# get chunk of data between start and end dates
batch = series.loc[(series.date >= start) & (series.date < end), 'value']
# calculate within period percent change
shrink = np.prod(1 + batch) - 1
ret.append(shrink)
monthly_data[col] = ret
i+=1
if((i % 100) == 0): print(i)
# time buffer to prevent 429 server error
time.sleep(.5)
# adding in zero-indexed data
data['MEIM683SFRBCHI'] = fr.series.observations('MEIM683SFRBCHI', params = params).value[1:]
data['RMEIM683SFRBCHI'] = fr.series.observations('RMEIM683SFRBCHI', params = params).value[1:]
# adding lagged GDP
lag1 = {'observation_start': "1989-10-01",
'observation_end': "2018-07-01"}
lag2 = {'observation_start': "1989-07-01",
'observation_end': "2018-04-01"}
| UTF-8 | Python | false | false | 2,716 | py | 17 | monthly_data.py | 7 | 0.67268 | 0.637334 | 0 | 91 | 28.846154 | 96 |
hacksman/learn_python | 2,078,764,202,454 | 3de2858bbff86e23a322cf84d5c624d14fbfc5df | 787b1afb2a502ae1eeb9454f8ae0df3f5490e8d2 | /supervisor_foo/__init__.py | 74e2f4bfb402edc9cc21d67d2d6c2da0bb21bad6 | []
| no_license | https://github.com/hacksman/learn_python | 204f2495116845de40ad48af444ab27273c2d08d | 4090d8b389e9b812b0f2fa1e5328f822d4cc864b | refs/heads/master | 2020-03-30T06:18:11.473822 | 2018-12-06T06:45:54 | 2018-12-06T06:45:54 | 150,850,695 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
The demo just run in python2
""" | UTF-8 | Python | false | false | 39 | py | 44 | __init__.py | 44 | 0.589744 | 0.564103 | 0 | 3 | 11.666667 | 29 |
AlgorithmicAmoeba/gpu_se | 1,786,706,427,304 | e86f92c93f0ce69724327f8e08c425dfce841380 | bf07c77c6618a146631b95064b1aa0862df9e8dc | /tests/inputter.py | 079557c5097ca4a707ab8c3ddefb9f90099c7b97 | []
| no_license | https://github.com/AlgorithmicAmoeba/gpu_se | 934bd9e36fd4831cd9b5edb160d6a3287ab4883c | 18ff5dd1a245388eb0b3ee28f0b562259aed22ba | refs/heads/master | 2022-12-31T16:36:54.076664 | 2020-10-27T11:46:03 | 2020-10-27T11:46:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Inputs:
"""Creates fake inputs for the glucose feed from past data
"""
def __call__(self, t):
t_batch = 30
Cn_in = 0.625 * 10 / 60 # (g/L) / (g/mol) = mol/L
if t < t_batch + 66:
CgFg = 0.151612826257827
elif t < t_batch + 101:
CgFg = 0.21241923938674
elif t < t_batch + 137:
CgFg = 0.283225652515653
else:
CgFg = 0.354032065644566
Cg_in = 314.19206 / 180 # (g/L) / (g/mol) = mol/L
Ca_in = 10 # mol/L
Cb_in = 10 # mol/L
Fm_in = 0
if t < t_batch:
Fg_in = 0
Fa_in = 0
Fb_in = 0
F_out = 0
else:
Fg_in = CgFg / 180 / Cg_in # (g/h) / (g/mol) / (mol/L) = L/h
Fn_in = 0.625 / 1000 / Cn_in / 60 # (mg/h) / (mg/g) / (mol/L) / (g/mol) = L/h
Fa_in = 0
Fb_in = 6e-9 # L/h
F_out = Fg_in + Fn_in + Fb_in + Fm_in
T_amb = 25
Q = 5 / 9
return Fg_in, Cg_in, Fa_in, Ca_in, Fb_in, Cb_in, Fm_in, F_out, T_amb, Q
| UTF-8 | Python | false | false | 1,099 | py | 53 | inputter.py | 41 | 0.410373 | 0.300273 | 0 | 38 | 27.868421 | 90 |
isemiguk/Python_Coursera | 2,662,879,767,590 | 768835344ba905d152f18d86619f0f47cadc596a | 71c47b4c8f35279c1f875c499ecae3c2eaf65432 | /Module_3/homework/productPrice.py | b974cdfff2b744af78f291e3391b8f62411a7da1 | []
| no_license | https://github.com/isemiguk/Python_Coursera | e53ead7ed966a0b11a9c43667caa00f9c4504328 | ce7d5904a91dfadd57d9a2bb4fb413d4c23c7a3c | refs/heads/master | 2022-11-06T13:56:50.926356 | 2020-07-01T19:12:22 | 2020-07-01T19:12:22 | 266,181,204 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
n = float(input())
m = (n - int(n)) * 100
print(int(n), round(m), sep=' ')
| UTF-8 | Python | false | false | 89 | py | 61 | productPrice.py | 61 | 0.539326 | 0.505618 | 0 | 6 | 13.833333 | 32 |
Visanpy-Tech/plotly-factory | 309,237,649,263 | 4ed3ceacd7579263860d139b8b9a4c838e6962f3 | 79887701ad864f98574d9a566dd40c0f12457a27 | /.ipynb_checkpoints/plotly_factory-checkpoint.py | cd1f16f9ea8ec817e306ebcdeb00e330b10dd625 | []
| no_license | https://github.com/Visanpy-Tech/plotly-factory | be0b117c318310836a06602cc677c734ddb27bd6 | 34a1ca55a43cca562c97b890f5d516f5feeb0019 | refs/heads/master | 2020-12-20T06:51:12.482662 | 2020-01-15T12:04:27 | 2020-01-15T12:04:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import plotly as pl
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from collections import OrderedDict, defaultdict
from datetime import datetime
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from scipy import stats
def axis_layout(title=None, title_size=16, title_color="#666666", grid_color="#F0F0F0", show_grid=False,
tick_angle=0, tick_family="Times New Roman", tick_size=14, tick_color="#4d4d4d", ticks="",
show_exponent=None, exponent_format=None, range_=None, dtick=None, showticklabels=True,
type_=None, fixedrange=True):
axis_dict = dict(
title=dict(
text=title,
font=dict(
size=title_size,
color=title_color
)
),
showgrid=show_grid,
gridcolor=grid_color,
dtick=dtick,
tickfont=dict(
family=tick_family,
size=tick_size,
color=tick_color
),
tickangle=tick_angle,
range=range_,
ticks=ticks,
showticklabels=showticklabels,
type=type_,
fixedrange=fixedrange,
showexponent = show_exponent,
exponentformat = exponent_format
)
return axis_dict
def title_layout(title, title_size=21, x_position=0.5, y_position=0.9, color="#A8A8A8", family="Times New Roman"):
title_ = dict(
text=title,
x=x_position,
y=y_position,
font=dict(
size=title_size,
color=color,
family=family
)
)
return title_
def plot_hue(df, main_bars, sub_bars, transparent=True):
d_dict = defaultdict(list)
grp = df.groupby(by=[sub_bars, main_bars]).count()
for (main, sub), val in zip(grp.index, grp.values):
d_dict[main].append({sub: val[0]})
fig = go.Figure()
subbars_count = df.groupby(by=sub_bars).count()
subbars = subbars_count.sort_values(by=df.columns[-1]).index
for sub in list(subbars)[::-1]:
values = [list(i.items())[0] for i in d_dict[sub]]
values = sorted(values, key=lambda x: x[1], reverse=True)
x = [i[0] for i in values]
y = [i[1] for i in values]
trace = go.Bar(
x=x,
y=y,
name=sub,
text=sub
)
fig.add_trace(trace)
if transparent:
fig.update_layout(paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)")
return fig
def plot_box(df, y, x_main, main_categories, x_sub=None, sub_categories=None, orientation="v",
notched=True, legend="default", mean=True, points="outliers", transparent=True):
fig = go.Figure()
colors = ["#191970", "#64b5f6", "#ef6c00", "#ffd54f"]
legend_font = {}
if legend == "default":
legend_font.update(
dict(bgcolor="rgba(0,0,0,0)",
font=dict(size=18, family="Times New Roman")
)
)
if x_sub is None:
x_ = df.loc[df[x_main].isin(main_categories)][x_main]
y_ = df.loc[df[x_main].isin(main_categories)][y]
trace = go.Box(
x=x_,
y=y_,
marker=dict(
size=5,
opacity=0.6,
color=colors[1]
),
boxmean=mean,
boxpoints=points,
orientation=orientation,
opacity=0.9,
notched=notched,
)
fig.add_trace(trace)
else:
for k, sub in enumerate(sub_categories):
x_ = df.loc[df[x_main].isin(main_categories) & (df[x_sub] == sub)][x_main]
y_ = df.loc[df[x_main].isin(main_categories) & (df[x_sub] == sub)][y]
trace = go.Box(
x=x_,
y=y_,
name=sub,
marker=dict(
size=5,
opacity=0.6,
color=colors[k]
),
boxmean=mean,
boxpoints=points,
orientation=orientation,
opacity=0.9,
notched=notched,
)
fig.add_trace(trace)
fig.update_layout(
{"boxmode":"group",
"legend":legend_font
}
)
if transparent:
fig.update_layout(paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)")
return fig
def plot_horizontal_count_bars(df, column, first_n="all", colorscale="algae", show_percentage=False, show_text=False,
text_font="default", text_position="inside", text_percentage_space=" ", round_percentage_decimals=1,
transparent=True):
# font for text and percentage
if text_font=="default":
text_font= dict(
family="Time New Roman",
size=16,
color="#000080"
)
# count and sort entries in a given column
total_counts = df[column].value_counts().sort_values(ascending=False)
if first_n == "all":
# we use [::-1] in order to have descending values frm top to bottom
counts = total_counts[::-1]
else:
counts = total_counts[:first_n][::-1]
y = counts.index # entries
x = counts.values # number of entries in a given column
# make bar chart
trace = go.Bar(
x=x,
y=y,
marker=dict(
color=counts,
colorscale=colorscale,
opacity=0.7
),
orientation="h",
hoverinfo="x + y"
)
# calculate percentages
if show_text:
if show_percentage:
counts_normalized = x / sum(total_counts)
percentage = np.round(100 * counts_normalized, round_percentage_decimals)
text = ["<b>" + entry + text_percentage_space +
str(percent) + " %" + "</b>" for entry, percent in zip(y, percentage)]
else:
text = ["<b>" + y_ + "</b>" for y_ in y]
elif show_percentage:
counts_normalized = x / sum(total_counts)
percentage = np.round(100 * counts_normalized, round_percentage_decimals)
text = ["<b>" + entry + text_percentage_space +
str(percent) + " %" + "</b>" for entry, percent in zip(y, percentage)]
else:
text = ["" for y_ in y]
# update trace with text
trace.update(dict(
text=text,
textposition=text_position,
textfont=text_font)
)
fig = go.Figure(data=[trace])
if transparent:
fig.update_layout(paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)")
return fig
def plot_subplots(df, main_category, sub_category, layout, n_rows, n_cols, n_bars,
colorscale="mint", vertical_spacing=0.2, share_x=False, share_y=False):
main_counts = df[main_category].value_counts()
sub_counts = df[sub_category].value_counts()
counts_dict = OrderedDict()
for main in main_counts.index:
counts = df.loc[df[main_category] == main][sub_category].value_counts()
subs = counts.index
subs_freq = counts.values
for sub, freq in zip(subs, subs_freq):
if main not in counts_dict.keys():
counts_dict.update({main: [[sub, freq]]})
else:
counts_dict[main].append([sub, freq])
fig = pl.subplots.make_subplots(
rows=n_rows,
cols=n_cols,
subplot_titles=list(main_counts.keys())[:n_cols * n_rows],
shared_yaxes=share_y,
shared_xaxes=share_x,
vertical_spacing=vertical_spacing)
for i, main in enumerate(list(counts_dict.keys())[:n_rows * n_cols]):
x = np.array(counts_dict[main])[:n_bars, 0]
y = np.array(counts_dict[main])[:n_bars, 1]
trace = go.Bar(
x=x,
y=y,
marker=dict(
color=sub_counts,
colorscale=colorscale),
hoverinfo="x + y",
)
fig.add_trace(trace, row=i // 2 + 1, col=i % 2 + 1)
if i > n_cols * n_rows - 2:
break
fig.update_layout(layout)
return fig
def plot_histograms(df, main_column, main_categories, sub_column=None, sub_categories=None, show_box=False,
x_legend=0.84, y_legend=0.7, legend="default", percentage=False, points = False,notched=True, mean=True, percentage_relative_to="sub_category", sort_values="initial_sort", transparent=True):
legend_font = {"x": x_legend, "y":y_legend}
if legend == "default":
legend_font.update(
{
"bgcolor":"rgba(0,0,0,0)",
"font":dict(size=18, family="Times New Roman")
}
)
colors = ["#191970", "#64b5f6", "#ef6c00", "#ffd54f"]
# a trick to generate bar charts only for main category
if sub_column is None:
df["trick_column"] = len(df)*["trick"]
sub_categories = ["trick"]
sub_column="trick_column"
showlegend=False
domain_1 =[0.90, 1]
domain_2 =[0, 0.90]
else:
showlegend=True
domain_1 =[0.80, 1]
domain_2 =[0, 0.80]
# iterate over all subcategories
for k, sub in enumerate(sub_categories):
# count entries of each subcategory that belongs to provided main category
counts = df.loc[(df[main_column].isin(main_categories)) & (
df[sub_column] == sub)][main_column].value_counts().sort_index()
for main in main_categories:
if main not in counts.index:
print(f"Column {main} not found")
counts.at[main] = 0
x_ = counts.index
## percentage options ##
if percentage == True:
if percentage_relative_to == "sub_category":
y_ = 100 * counts.values / len(df.loc[df[sub_column] == sub])
elif percentage_relative_to == "total":
y_ = 100 * counts.values / len(df)
elif percentage_relative_to == "main_category":
denominator = df.loc[df[main_column].isin(
main_categories)][main_column].value_counts().sort_index()
for main in main_categories:
if main not in denominator.index:
denominator.at[main] = 0
y_ = 100 * counts.values / denominator.values
text = [str(count) + ", " + str(round(y, 2)) + " %" for count, y in zip(counts, y_)]
else:
y_ = counts.values
text = [str(x) + str(y) for x, y in zip(x_, y_)]
# sorting options
if sort_values == "counts":
x_y_ = list(zip(x_, y_))
x_y_sorted = sorted(x_y_, key=lambda item: item[1], reverse=True)
x_ = [item[0] for item in x_y_sorted]
y_ = [item[1] for item in x_y_sorted]
elif sort_values == "initial_sort":
values_dict = OrderedDict(zip(x_, y_))
y_ = np.array([values_dict.get(key) for key in main_categories])
x_ = main_categories
# for every subcategory make a bar chart
bar = go.Bar(
y=np.round(y_, 2),
x=x_,
marker=dict(color=colors[k], opacity=0.7),
name=sub,
text=text,
hoverinfo="x + text",
showlegend=showlegend
)
if show_box:
# if show_box is True, make a subplots 2x1
if k == 0:
fig = make_subplots(rows=2, cols=1)
box = go.Box(x=df.loc[df[sub_column] == sub][main_column],
marker=dict(color=colors[k]),
boxpoints=points,
notched=notched,
boxmean=mean,
showlegend=False,
)
# add box and bar plots to the subplots
fig.add_trace(box, row=1, col=1)
fig.add_trace(bar, row=2, col=1)
fig.layout["xaxis"].update(
axis_layout(showticklabels=False,
range_=[main_categories[0],
main_categories[-1]],
)
)
# set box-bar ratio to 0.8 : 0.2
fig.layout["yaxis"].update(
axis_layout(showticklabels=False),
domain=domain_1)
fig.layout["yaxis2"].update(domain=domain_2)
else:
# if show_box is False, make a single Figure and add bar chart
if k == 0:
fig = go.Figure()
fig.add_trace(bar)
fig.update_layout(legend=legend_font)
if transparent:
fig.update_layout(paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)")
if "trick_column" in df.columns:
df.drop(columns=["trick_column"], inplace=True)
return fig
def plot_heatmap(df_corr, show_annotations=True, colorscale="brwnyl", plot_triangle = False,
showscale=True, text_color = "#000000", text_size = 14, xgap=1, ygap=1):
trace=go.Heatmap(
colorscale=colorscale,
hoverinfo="text",
xgap=xgap,
ygap=ygap,
showscale=showscale
)
if plot_triangle:
corr_triangle = np.array([[None for k in range(df_corr.shape[1]-1)] for j in range(df_corr.shape[0]-1)])
for k, vals in enumerate(df_corr[1:].values):
corr_triangle[k][:k+1] = np.round(vals[:k+1], 2)
trace.update(z = corr_triangle[::-1], x = df_corr.index[:-1],
y = df_corr.index[1:][::-1], text = corr_triangle[::-1])
fig = go.Figure(data=[trace])
if show_annotations:
annotations = []
for k, y in enumerate(df_corr.index):
for x in df_corr.index[:k]:
anot = go.layout.Annotation(
x=x,
y=y,
xref="x",
yref="y",
text= "<b>" + str(round(df_corr[x][y], 2)) + "</b>",
showarrow=False,
font = {
"color": text_color,
"size": text_size,
"family": "Times New Roman"
}
)
annotations.append(anot)
else:
trace.update(z = df_corr.values[::-1], x = df_corr.index,
y = df_corr.index[::-1], text= np.round(df_corr.values[::-1], 2))
fig = go.Figure(data=[trace])
if show_annotations:
annotations = []
for k, y in enumerate(df_corr.index):
for x in df_corr.index:
anot = go.layout.Annotation(
x=x,
y=y,
xref="x",
yref="y",
text= "<b>" + str(round(df_corr[x][y], 2)) + "</b>",
showarrow=False,
font = {
"color": text_color,
"size": text_size,
"family": "Times New Roman"
}
)
annotations.append(anot)
fig.update_layout(
showlegend=False,
annotations=annotations
)
fig.update_layout(
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)"
)
return fig
def plot_table(df, title=False, cell_height=45, add_table_height=0, width=700, header_font="default", cell_font="default",
header_bg_color="#191970", header_align="center", cell_align="center",
cell_bg_colors=["rgba(158, 223, 249, 0.20)", "white"], split_words_on= True, line_color="lightgrey", line_width=2):
if header_font == "default":
header_font = dict(family="Times New Roman", color="white", size=15)
if cell_font == "default":
cell_font = dict(family="Times New Roman", color="#002266", size=14)
if split_words_on:
header_values = [val.replace("-", "<br>") for val in df.columns]
table = go.Table(
header=dict(
values=header_values,
fill_color=header_bg_color,
line_color=line_color,
line_width=line_width,
height=cell_height,
font=header_font,
align=header_align
),
cells=dict(
values=[df[column] for column in df.columns],
font=cell_font,
height=cell_height,
align=cell_align,
fill_color=[cell_bg_colors * len(df)],
line_color=line_color,
line_width=line_width,
),
)
layout = go.Layout(
height=(len(df) + 1) * cell_height + len(df) + add_table_height,
width=width,
margin=dict(r=1, b=0, l=1, t=0),
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
)
if title:
layout.update(height=(len(df) + 1) * cell_height + len(df) + 30 + add_table_height,
title=title, margin=dict(r=1, b=0, l=1, t=35), width=width)
fig = go.Figure(data=[table], layout=layout)
return fig
def plot_distplot(df, column, hist=True, kde=True, gauss=True, show_box=True, points = False,
notched=True, show_mean=True, kde_resolution=128, colors="default", range_="auto",
n_bins=None, x_legend=0.85, y_legend=0.8, show_legend=True, legend="default", bargap=0.03):
# vrednosti koje fitujemo
variable_values = df[column].values
# generiši vrednosti za x osu
x_values = np.linspace(min(variable_values), max(variable_values), kde_resolution)
# srednja vrednost i medijana za vrednosti koje fitujemo
mean, std = stats.norm.fit(variable_values)
# gustina verovatniće
gauss_prob_dens = stats.norm.pdf(sorted(df[column].values), loc= mean, scale=std)
# Kernel Density Estimate gustina verovatnoće
kde = stats.gaussian_kde(variable_values)
kde_values = kde(x_values)
if colors=="default":
colors = ["#191970", "#64b5f6", "#ef6c00", "#03adfc"]
traces = []
if show_box:
box = go.Box(
x=variable_values,
marker=dict(color=colors[3]),
boxpoints=points,
notched=notched,
boxmean=show_mean,
showlegend=False,
)
if hist:
hist_trace = go.Histogram(x=variable_values,
histnorm='probability density',
marker=dict(color=colors[0], opacity=0.7),
nbinsx=n_bins,
name="Histogram",
showlegend = show_legend
)
traces.append(hist_trace)
# KDE probability density
if kde:
kde_trace = go.Scatter(
x=x_values,
y=kde_values,
name="KDE PDF",
showlegend = show_legend
)
traces.append(kde_trace)
# Gaussian probability density
if gauss:
gauss_trace = go.Scatter(
x=sorted(variable_values),
y=gauss_prob_dens,
name="Gauss PDF",
line=dict(color="#FFA500"),
showlegend = show_legend
)
traces.append(gauss_trace)
if range_=="auto":
range_= [min(variable_values), max(variable_values)]
if show_box:
fig = make_subplots(rows=2, cols=1)
fig.add_trace(box, row=1, col=1)
for trace in traces:
fig.add_trace(trace, row=2, col=1)
fig.layout["xaxis2"].update(
axis_layout(
show_grid=False,
range_=range_,
ticks=""
)
)
fig.layout["yaxis2"].update(
axis_layout(ticks=""),
domain=[0, 0.75],
showexponent = 'last',
exponentformat = 'power'
)
fig.layout["xaxis"].update(
axis_layout(
title="",
ticks="",
showticklabels=False,
range_=range_,
show_grid=False,
)
)
fig.layout["yaxis"].update(axis_layout(title="", ticks="", showticklabels=False, show_grid=False),
domain=[0.78, 1])
else:
fig = go.Figure()
for trace in traces:
fig.add_trace(trace)
fig.layout["xaxis"].update(
axis_layout(
title="",
range_=range_,
show_grid=False
),
)
fig.layout["yaxis"].update(
axis_layout(
title="",
show_grid=True
),
showexponent = 'last',
exponentformat = 'power'
)
layout = go.Layout(bargap=0.02)
legend_font = {}
legend_font["x"] = x_legend
legend_font["y"] = y_legend
if legend == "default":
legend_font.update(dict(bgcolor="rgba(0,0,0,0)",
font=dict(size=16, family="Times New Roman")))
fig.update_layout(
legend=legend_font,
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
bargap=bargap
)
return fig
def count_by(df, main_category, sub_category, n_main, n_sub, add_percentage=False,):
main_counts = df[main_category].value_counts()
total_counts = OrderedDict()
for main in main_counts.index[:n_main]:
sub_counts = df.loc[df[main_category] ==
main][sub_category].value_counts()
subs = sub_counts.index[:n_sub]
percentage = 100 * sub_counts.values[:n_sub] / sum(sub_counts.values)
for sub, percent in zip(subs, percentage):
if main not in total_counts.keys():
if add_percentage:
total_counts.update(
{main: [sub + f"<br>{percent:.1f} %</br>"]})
else:
total_counts.update({main: [sub]})
else:
if add_percentage:
total_counts[main].append(
sub + f"<br>{percent:.1f} %</br>")
else:
total_counts[main].append(sub)
return pd.DataFrame(total_counts)
def plot_predictions(y_true, y_predict, num_sample, title, figsize= (10,5)):
sample_ind = np.random.randint(0, len(y_true), size = num_sample)
sample_true = y_true[sample_ind]
sample_predict = y_predict[sample_ind]
plt.figure(figsize=figsize)
plt.scatter(np.arange(num_sample), sample_true,
marker = 'o', s=30, edgecolors='b', facecolors='none', label = 'True values')
plt.scatter(np.arange(num_sample), sample_predict,
marker = 'x', c='red', s=20, label = 'Predicted values')
val = [i for i in range(num_sample) for j in range(2)]
s = []
for i in range(num_sample):
s.append(sample_true[i])
s.append(sample_predict[i])
for i in range(num_sample):
plt.plot(val[2*i:2*i+2], s[2*i:2*i+2], ':', c = 'green', alpha=0.5)
plt.legend(loc = 'best', fontsize=8)
plt.xticks(np.arange(0,num_sample))
plt.grid(alpha = 0.4)
plt.ylabel("Price", fontsize = 10)
plt.title(title, fontsize=12)
plt.show()
r2_ = r2_score(y_true, y_predict)
mse = mean_squared_error(y_true, y_predict)
mae = mean_absolute_error(y_true, y_predict)
print("Mean Root Squared Error: {0:.3f}.".format(mse**0.5))
print("Mean Absolute Error: {0:.1f}.".format(mae))
print("R Squared Metric: {0:.3f}.".format(r2_))
def removeBarButtons():
return dict(
displaylogo=False,
modeBarButtonsToRemove=["pan2d", "lasso2d", "select2d", "toggleSpikelines", "autoScale2d",
"hoverClosestCartesian", "hoverCompareCartesian"]
)
def to_year(year):
dt_year = datetime.strptime(str(int(year)), '%Y')
return dt_year
def update_dict(dictonary, update):
dictonary.update(update)
return dictonary
| UTF-8 | Python | false | false | 24,573 | py | 10 | plotly_factory-checkpoint.py | 5 | 0.509035 | 0.492186 | 0 | 736 | 32.383152 | 210 |
Josenyldo/Grafos | 807,453,877,068 | f82a4ed6797d00e9520af921f4980dc9cdc68b08 | a50925b1b8f243ce0dc76d94f3ece5b7724f3224 | /quarta_lista/grafo_adj_test.py | b4e5d9d163a72b897986221daf254ea87fd7a128 | []
| no_license | https://github.com/Josenyldo/Grafos | 8a6aae4e7a19dd5ba4c9e538f8300f951d669836 | bad789a95c9132dd9269a8cf64be346f37620e59 | refs/heads/master | 2020-04-29T13:43:55.121149 | 2019-06-20T00:19:14 | 2019-06-20T00:19:14 | 176,176,694 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from quarta_lista.grafo_adj import *
class TestGrafo(unittest.TestCase):
def setUp(self):
# Grafo da Paraíba
self.g_p = Grafo([], [])
for i in ['A','B','C','D']:
self.g_p.adiciona_vertice(i)
for i in ['A-B','B-D','D-C','C-B','C-D']:
self.g_p.adiciona_aresta(i)
self.g_p2 = Grafo([], [])
for i in ['J', 'C', 'E', 'P', 'M']:
self.g_p2.adiciona_vertice(i)
for i in ['M-P', 'P-E', 'P-C', 'J-E', 'M-J']:
self.g_p2.adiciona_aresta(i)
self.g_c = Grafo([], [])
for i in ['H', 'B', 'C','D','E','F']:
self.g_c.adiciona_vertice(i)
for i in ['B-H','C-D','F-E']:
self.g_c.adiciona_aresta(i)
def test_whashall(self):
self.assertEqual(self.g_p.whashall(),[[0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]])
self.assertEqual(self.g_p2.whashall(), [[0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [1, 1, 1, 1, 0]]);
self.assertEqual(self.g_c.whashall(), [[0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0]]) | UTF-8 | Python | false | false | 1,197 | py | 9 | grafo_adj_test.py | 7 | 0.438963 | 0.371237 | 0 | 36 | 32.25 | 167 |
houssemFat/MeeM-Dev | 12,008,728,582,537 | 697fa89448bc787dda903585a22bacd0f5e54ad9 | d71d9ff4787be6375a40618a7d9b21b578e89fe0 | /teacher/apps/collaboration/models.py | 7c088f8745df59b3e0bef4ecf4f06a2f61dbc323 | [
"MIT"
]
| permissive | https://github.com/houssemFat/MeeM-Dev | 974f1fadc118d63d9afd24be5f5c534a7a839403 | f17310cce8e7747363f4911c28f60c1339a3431d | refs/heads/master | 2021-01-13T01:42:23.425197 | 2016-11-03T12:37:02 | 2016-11-03T12:37:02 | 24,350,362 | 1 | 1 | null | false | 2016-11-03T12:37:05 | 2014-09-23T00:17:52 | 2015-06-22T12:47:24 | 2016-11-03T12:37:03 | 11,063 | 2 | 1 | 0 | CSS | null | null | from django.db import models
from core.apps.accounts.models import User
from datetime import datetime
class Collaborator(models.Model):
source = models.ForeignKey(User, db_column="source_id", related_name="my_collaborators")
user = models.ForeignKey(User, db_column="user_id", related_name="my_collaborators_with_others")
join_at = models.DateTimeField(default=datetime.now)
class Meta:
db_table = 'teacher_collaboration_collaborator'
unique_together = ['source', 'user']
class CollaboratorInvitation(models.Model):
fromuser = models.ForeignKey(User, db_column="source_id", related_name="sent_invitations")
usermail = models.EmailField()
sent_at = models.DateTimeField(default=datetime.now)
accepted = models.BooleanField(default=False)
class Meta:
db_table = 'teacher_collaboration_collaborator_invitation' | UTF-8 | Python | false | false | 870 | py | 570 | models.py | 385 | 0.737931 | 0.737931 | 0 | 20 | 42.55 | 100 |
selivanov-as/ml-text-highlights | 2,585,570,356,401 | a1f0bfe9162e9a244a09e4e2d352e6026d3b25a8 | 008f5d199fdcf8ae247783da6c14d07fecc4f51a | /server/samples_raw.py | 4676c04ccd49b349381aebc0574ba145bf8642c9 | []
| no_license | https://github.com/selivanov-as/ml-text-highlights | a07944bc8bb6cb63c6494d0a84f5151c81a0f71a | c1f47f60a8c873303bc3e0c9aa7624061dc4ef15 | refs/heads/master | 2022-12-10T03:48:19.501782 | 2019-05-20T13:48:22 | 2019-05-20T13:48:22 | 152,454,802 | 0 | 2 | null | false | 2022-12-08T02:28:51 | 2018-10-10T16:27:16 | 2019-05-20T13:49:40 | 2022-12-08T02:28:51 | 401,592 | 0 | 0 | 32 | Jupyter Notebook | false | false | samples_raw = [
{
"words": ["Здравствуйте", "Kirill", "Bulgakov", "Мы", "обратили", "внимание", "что", "Вы", "еще", "@не",
"@активировали",
"Вашу", "Предоплаченную", "@Дебетовую", "@Карту", "@Payoneer", "@MasterCard®", "и", "хотели", "бы",
"убедиться", "в",
"том", "что", "Вы", "@получили", "@ее", "Если", "Вы", "еще", "@не", "@получили", "Вашу", "@карту",
"пожалуйста",
"@посетите", "@Центр", "@Поддержки", "для", "получения", "дополнительной", "информации", "Если", "Вы",
"уже",
"@получили", "Вашу", "@карту", "Вы", "@можете", "@войти", "@в", "Ваш", "@аккаунт", "@Payoneer",
"@активировать", "@ее", "@и",
"@начать", "@получать", "@выплаты", "Для", "получения", "дополнительной", "информации",
"относительно",
"доставки",
"карт", "пожалуйста", "@прочитайте", "@эту", "@статью", "@в", "@блоге", "@Payoneer", "Спасибо",
"Коллектив",
"Payoneer"]
},
{
"words": ["@«М", "@Видео»", "@по", "@ошибке", "@показал", "@фото", "@и", "@характеристики", "@нового",
"@«Яндекс",
"@Телефона»",
"16", "29", "02", "Декабря", "2018", "Москва", "Интернет", "магазин",
"@«М", "@Видео»", "@по", "@ошибке", "@выложил", "в", "сеть", "@фото", "@и", "@характеристики",
"@«Яндекс",
"@Телефона»", "@релиз", "@которого", "@был", "@назначен", "@только",
"@на", "@5", "@декабря", "", "", "Сообщается", "", "что", "на", "сайте",
"интернет", "магазина", "была", "по", "ошибке", "@опубликована", "@информация", "@о", "@том",
"@как", "именно", "@будет", "@выглядеть", "@новинка", "", "причем", "ее", "официальная",
"презентация",
"должна", "состояться", "только", "декабря", "@Сведения", "спустя", "некоторое", "время", "@были",
"@в",
"@срочном", "@порядке", "@удалены", "однако", "некоторым", "специалистам", "удалось", "успеть",
"сохранить", "сведения", "и", "медиафайлы", "Согласно", "обнародованным", "данным", "@корпус",
"нового", "@смартфона", "@будет", "@изготовлен", "@из", "@пластика", "@и", "@металла", "@а",
"@«сердцем»", "«Яндекс", "Телефона»", "@станет", "@Snapdragon", "@630", "@Разрешение", "@основной",
"@камеры", "@составит", "@16", "@Мп", "@а", "@фронтальной", "@5", "@Мп", "@Гаджет", "@будет",
"@оснащен",
"@чипом", "@NFC", "@и", "@зарядом", "@батареи", "@на", "@3060", "@мАЧ", "Также", "стала", "известна",
"@стоимость", "@устройства", "которая", "@составит", "@17", "@990", "@рублей", "Отметим", "что",
"@ранее",
"@в", "@сети", "@уже", "@появлялись", "@характеристики", "«@Яндекс", "@Телефона»", "@которые",
"@полностью",
"@совпадают", "@с", "@указанными", "@на", "@сайте", "@«М", "@Видео»",
]
},
{
"words": ["Добрый", "день", "@Помогают", "@ли", "@вам", "@онлайн", "@курсы", "@от",
"@Яндекса", "@и", "@МФТИ", "@добиваться", "@своих", "@целей", "@Расскажите",
"@нам", "@об", "э@том", "@заполнив", "@опрос", "@по", "@ссылке", "@Ваш", "@ответ",
"@поможет", "@улучшить", "@качество", "@программ", "@и", "@материалов", "которые",
"мы", "создаем", "С", "уважением", "Команда", "создателей", "курса"]
}
]
| UTF-8 | Python | false | false | 5,202 | py | 103 | samples_raw.py | 13 | 0.424161 | 0.417181 | 0 | 55 | 66.727273 | 120 |
Braxton22/https---github.com-Braxton22-PizzeriaProject | 17,927,193,525,737 | b6d0eb6d72c455b1a192001760318cd87d6bbaf8 | ad9ffc134f7c7b224b0a3f37346163c3a23f4cba | /Pizzeria/urls.py | 17a3df1a18ad18cc8181015f2dae8780876d9f95 | []
| no_license | https://github.com/Braxton22/https---github.com-Braxton22-PizzeriaProject | 7e92a111a94593cca6e0798c102fd0e2da30b81d | e8cb47ac3cadd99b131cbb812d17105e05fd3f49 | refs/heads/master | 2023-04-21T18:57:34.803139 | 2021-05-01T15:15:04 | 2021-05-01T15:15:04 | 363,440,237 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #need the path function to map a url to a view
from django.urls import path
#dot tells python to import views.py module from the same directory as the current urls.py module
from . import views
#the variable app_name helps Django distinguish this urls.py file from the files of the same name in other apps within the project
app_name = 'Pizzeria'
#the variable urlpatterns in this moudle is a list of individual pages that can be requested form the Pizzeria app
urlpatterns = [
#the empty string matches the base url (nothing in there), the second argument calls in views, and t he third provides index for this url pattern to refer to it later
path('',views.index,name='index'),
path('pizzas',views.pizzas, name='pizzas'),
path('pizzas/<int:pizza_id>/',views.pizza,name='pizza'),
path('new_comment/<int:pizza_id>/',views.new_comment,name='new_comment')
] | UTF-8 | Python | false | false | 879 | py | 11 | urls.py | 7 | 0.746303 | 0.746303 | 0 | 17 | 50.764706 | 170 |
mcnulty1/Python-Team-Project | 16,518,444,245,422 | 1cf3dbcdfe61b0b819ea2389c6389574793597c0 | 07d8ed7d833dae63cd15f058feaa05ca0b0c6316 | /Teamproject_grayscale.py | 624766a6ff6d0639b88e3f2cee56c60af06f1e26 | []
| no_license | https://github.com/mcnulty1/Python-Team-Project | e18146df999b610abcbd185b01bef2e15f7857ec | 8d54f3eea01278d89c7e2615f638e09d9a154a83 | refs/heads/main | 2022-12-30T03:10:47.845697 | 2020-10-06T16:47:24 | 2020-10-06T16:47:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Grayscale Function"""
| UTF-8 | Python | false | false | 26 | py | 3 | Teamproject_grayscale.py | 3 | 0.653846 | 0.653846 | 0 | 1 | 24 | 24 |
seekingup/LeetCode-Python | 15,848,429,327,348 | db7258c06057ea446dbaffa2404c6198fc8551bc | 49df576fb57b5b29b7aa2d300891c9395b96ecbf | /120_minimumTotal.py | 6493b9cdc37dc776c7c84e62844d0acd2124bd61 | []
| no_license | https://github.com/seekingup/LeetCode-Python | eed4590e883106bf769727f7b582f8dad89bb8dc | 6d3274be758c9d8a4d61aa51eb328a46d319eadf | refs/heads/master | 2020-03-21T10:59:22.495077 | 2019-06-07T09:52:10 | 2019-06-07T09:52:10 | 138,482,626 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
n = len(triangle)
if n == 0:
return 0
dp = [0 for i in range(n)]
dp_last = [0 for i in range(n)]
dp_last[0] = triangle[0][0]
for i in range(1, n):
dp[0] = dp_last[0] + triangle[i][0]
for j in range(1, i):
dp[j] = min(dp_last[j - 1], dp_last[j]) + triangle[i][j]
dp[i] = dp_last[i - 1] + triangle[i][i]
dp_last, dp = dp, dp_last
return min(dp_last)
if __name__ == '__main__':
# inputs
triangle = [
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
triangle = [[2]]
triangle = [[-1],[-2,-3], [-4, -5, -6]][:2]
# triangle = [[2], [3, 4], [6, 5, 7]]
print('-' * 30)
res = Solution().minimumTotal(triangle)
print(res) | UTF-8 | Python | false | false | 975 | py | 266 | 120_minimumTotal.py | 264 | 0.408205 | 0.367179 | 0 | 35 | 25.914286 | 72 |
erllan/-course | 15,101,105,024,412 | 0d47503d5e1c7b5baaa72cce239c05750cbc641f | 72adbcd7bfb34a7af6da79c75fe75dc94289b550 | /rest/RestTutorial/migrations/0004_course_logo.py | a11537806962ef77af976a380c1e14a0ce95f42d | []
| no_license | https://github.com/erllan/-course | a42957da77ccdaa256fa39204096e8282f010575 | 6267857e28f0210634c16074720c84372817e195 | refs/heads/master | 2023-01-06T13:23:46.634221 | 2020-10-27T13:22:38 | 2020-10-27T13:22:38 | 305,687,614 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.2 on 2020-10-21 09:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('RestTutorial', '0003_auto_20201021_1501'),
]
operations = [
migrations.AddField(
model_name='course',
name='logo',
field=models.ImageField(null=True, upload_to='course_logo', verbose_name='logo'),
),
]
| UTF-8 | Python | false | false | 430 | py | 10 | 0004_course_logo.py | 9 | 0.6 | 0.527907 | 0 | 18 | 22.888889 | 93 |
MysteriousSonOfGod/python-3 | 4,389,456,601,471 | c1a6b678d006f5267cb6920ec947edcedb3e1d88 | 44e8334e1b17fda7f60d9760f59868a9227e2ab0 | /ML/chap01/ch02_0.py | c920dbddea81be910b3a0defc94134e90432823f | []
| no_license | https://github.com/MysteriousSonOfGod/python-3 | 47c2aa69a84ba78876c74bc6f2e7e6f3093df1e2 | a303a5284c40f3cb96a8082a1f5ed80773b66336 | refs/heads/master | 2023-02-16T18:21:46.153388 | 2021-01-13T10:55:14 | 2021-01-13T10:55:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import mglearn
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import images.image
# 2. 두 개의 특성을 가진 forge 데이터셋은 인위적으로 만든 이진 분류 데이터셋
X, y = mglearn.datasets.make_forge()
print("X.shape: {}".format(X.shape))
print("y.shape: {}".format(y.shape))
print("X 타입: {}".format(type(X)))
print("y 타입: {}".format(type(y)))
print(X[:, 0], X[:, 1], y)
# 산점도를 그립니다. 2개의 특성과 1개의 타켓(2개의 값)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
plt.legend(["클래스 0", "클래스 1"], loc=4)
plt.xlabel("첫 번째 특성")
plt.ylabel("두 번째 특성")
plt.title("Forge Scatter Plot")
images.image.save_fig("2.Forge_Scatter")
plt.show()
# 훈련 세트, 테스트 세트
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
print("X_train 크기: {}".format(X_train.shape))
print("y_train 크기: {}".format(y_train.shape))
print("X_test 크기: {}".format(X_test.shape))
print("y_test 크기: {}".format(y_test.shape))
# 산점도를 그립니다. 2개의 특성과 1개의 타켓(2개의 값)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.legend(["클래스 0", "클래스 1"], loc=4)
plt.xlabel("첫 번째 특성")
plt.ylabel("두 번째 특성")
plt.title("Forge Scatter Plot")
images.image.save_fig("2.Forge_Scatter_by_X_train")
plt.show()
# 산점도 비교 1:전체 2:X_train 3:X_test
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
for X, y, title, ax in zip([X, X_train, X_test], [y, y_train, y_test], ['전체','X_train','X_test'], axes):
mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title("{}".format(title))
ax.set_xlabel("특성 0")
ax.set_ylabel("특성 1")
axes[0].legend(loc=3)
images.image.save_fig("2.Forge_scatter_compare")
plt.show()
| UTF-8 | Python | false | false | 2,014 | py | 264 | ch02_0.py | 257 | 0.655212 | 0.634021 | 0 | 55 | 30.690909 | 104 |
markcornwell/pychess | 5,119,601,035,316 | e8ab313c1ade708c279c7df498343b77fc5660e2 | 7b221c6963f211c82030296f6a65aec1568574fc | /mediate.py | 2f4562decc5baca62123379f3336cd48b8458ab3 | []
| no_license | https://github.com/markcornwell/pychess | 3ffd660deedb918b3d407d4938cbb614f644c1b9 | b4f3a406ace2378c52a1057ebdd4a2bea7285c65 | refs/heads/master | 2021-03-27T11:10:15.395579 | 2015-10-31T20:08:35 | 2015-10-31T20:08:35 | 45,316,756 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python -Ou
# $Id: mediate.py 7 2010-01-13 12:16:47Z mark $
#
# These are fuctions to mediate between the chess routines and the outside
# inteface, e.g. command line or a gui
#
# Works with XBoard - see engine-intf.html
# Do not use WinBoard. Use XBoard under cygwin's xwin
# Need to patch security flaw in XBoard
import os, sys, random
import board, legal, move, algebraic, san
from square import *
from attack import blackAttacks,whiteAttacks
import time
import best
import alphabeta
#playing = False
engineColor = None # color engine plays
assert engineColor in [None,'white','black']
def stopClocks():
pass # TBD
feature_requested = { }
feature_accepted = { }
def put_featureXXX():
put('feature myname="Black Momba"')
put('feature usermove=1 san=1 draw=1 colors=0 sigint=0 sigterm=0 done=1')
def put_feature():
request('myname','\"Black Momba\"')
request('usermove','1')
request('san','1')
request('draw','1')
request('colors','0')
request('sigint','0')
request('sigterm','0')
request('time','0')
request('reuse','1')
#request('done','1') # must be last feature
def request(key,val):
feature_requested[key]=val
put('feature %s=%s' % (key,val))
def accepted(cmd):
key = cmd.split(" ")[1]
feature_accepted[key] = feature_requested[key]
def get(strip=True):
#log("readline...")
s = sys.stdin.readline()
if engineColor==None:
log("?????(%s) got %s\n" % (os.getpid(),s))
else:
log("%s(%s) got %s\n" % (engineColor,os.getpid(),s))
if strip:
return s.replace("\n","")
else:
return s
def put(s,newline=True):
if newline:
ss = "%s\n" % s
else:
ss = s
if engineColor==None:
log("?????(%s) put %s" % (os.getpid(),ss))
else:
log("%s(%s) put %s" % (engineColor,os.getpid(),ss))
sys.stdout.write(ss)
logf = open("mediate.log","w")
logf.write("log begun\n")
logf.flush()
def log(s):
global logf
logf.write(s)
logf.flush()
def starts(cmd,s):
return cmd[0:len(s)]==s
def xboard():
global engineColor
cmd = "dummy"
while cmd!="quit":
#log("cmd %s\n" % cmd)
if cmd=="xboard":
put("")
elif starts(cmd,"protover"):
protover(cmd)
elif starts(cmd,"accepted"):
accepted(cmd)
elif starts(cmd,"new"):
new()
elif starts(cmd,"variant"):
put_error("variant","not implemented")
elif starts(cmd,"force"):
force()
elif starts(cmd,"go"):
go()
elif starts(cmd,"random"):
pass
elif starts(cmd,"computer"):
pass
elif starts(cmd,"level"):
pass
elif starts(cmd,"hard"):
pass
elif starts(cmd,"time"):
pass
elif starts(cmd,"otim"):
pass
elif starts(cmd,"playother"):
put_error("playother","not implemented")
elif starts(cmd,"usermove"):
usermove(cmd)
elif starts(cmd,"?"):
put_error("?","not implemented")
elif starts(cmd,"ping"):
put_pong(cmd)
elif starts(cmd,"draw"):
put("offer draw")
elif starts(cmd,"result"):
#put_error("result","not implemented")
engineColor = None
elif starts(cmd,"setboard"):
put_error("setboard","not implemented")
elif cmd=="dummy":
pass
else:
put_error(cmd,"unrecognized command")
cmd = get()
def new():
# Reset the board to the standard chess starting position. Set White
# on move. Leave force mode and set the engine to play Black.
# Associate the engine's clock with Black and the opponent's clock
# with White. Reset clocks and time controls to the start of a new
# game. Stop clocks. Do not ponder on this move, even if pondering is
# on. Remove any search depth limit previously set by the sd command.
global engineColor
stopClocks()
board.reset()
engineColor = 'black'
def force():
#Set the engine to play neither color ("force mode"). Stop clocks.
# The engine should check that moves received in force mode are
# legal and made in the proper turn, but should not think, ponder, or
# make moves of its own.
global engineColor
engineColor = None
stopClocks()
def go():
#Leave force mode and set the engine to play the color that is on
# move. Associate the engine's clock with the color that is
# on move, the opponent's clock with the color that is not on move.
# Start the engine's clock. Start thinking and eventually make a move.
global engineColor
assert engineColor == None
engineColor = board.colorToMove()
assert engineColor in ['white','black']
time.sleep(1)
reply()
def usermove(cmd):
# By default, moves are sent to the engine without a command name;
# the notation is just sent as a line by itself. Beginning in protocol
# version 2, you can use the feature command to cause the command
# name "usermove" to be sent before the move. Example: "usermove
# e2e4".
assert engineColor in ['black','white',None]
if board.colorToMove()==engineColor:
put_error(cmd,"out of turn move ignored")
else:
mvstring = cmd.split(" ")[1]
opponent_move(mvstring)
def opponent_move(s):
assert engineColor in ['white','black',None]
assert board.colorToMove()!=engineColor
mvs = legal.moves()
found = algebraic.find(s,mvs)
if len(found)==0:
put_illegalmove(s,"not found")
elif len(found)==1:
assert board.colorToMove()!=engineColor
assert move.color(found[0])!=engineColor
move.make(found[0])
if board.colorToMove()==engineColor:
assert board.colorToMove()==engineColor
reply()
elif len(found)>1:
put_illegalmove(s,"ambiguous")
def reply():
#if not board.colorToMove()==engineColor:
# return None
assert board.colorToMove()==engineColor
mvs = legal.moves()
#time.sleep(0.1)
if len(mvs)>0:
if len(board.history)>300:
resign()
else:
mv,score,cnt,sec = alphabeta.best(mvs)
s = san.pr(mv,mvs)
put_move(s)
move.make(mv)
else:
assert len(mvs)==0
report_result()
def resign():
global engineColor
if engineColor=='white':
put_result("0-1","white resigns")
else:
put_result("1-0","black resigns")
engineColor==None
def report_result():
assert len(legal.moves())==0
if board.whiteToMove():
if blackAttacks(board.whiteKingAt):
put_result("1-0","black mates")
else:
put_result("1/2-1/2","stalemate")
else:
assert board.blackToMove()
if whiteAttacks(board.blackKingAt):
put_result("0-1","white mates")
else:
put_result("1/2-1/2","stalemate")
def protover(cmd):
global protocolVersion
protocolVersion = cmd.split(" ")[1]
put_feature()
# These all talk ot winboard
def put_pong(cmd):
number = cmd.split(" ")[1]
put("pong %s" % number)
def put_illegalmove(mvstring,reason=""):
if reason=="":
put("Illegal move: %s" % mvstring)
else:
put("Illegal move (%s): %s" % (reason,mvstring))
def put_error(cmd,reason):
put("Error (%s): %s" % (reason,cmd))
def put_move(mvstring):
"tell winboard what move the engine made"
put("move %s" % mvstring)
def put_result(res,comment):
if res in ["0-1","1-0","1/2-1/2"]:
put("%s { %s }" % (res,comment))
else:
put_error("res","internal error in engine")
xboard()
| UTF-8 | Python | false | false | 8,098 | py | 31 | mediate.py | 24 | 0.568659 | 0.55915 | 0 | 273 | 27.663004 | 77 |
hkaraoguz/quart_swagger | 2,516,850,858,616 | adbe19614808c8d412ddb34ae23b9d8e0ea54aa8 | 9d489e4f0c3aa5c7b970e7a9e842535300fb1591 | /quart_swagger_blueprint.py | 058832ceebad60de200ec3478cdb28ac469536af | []
| no_license | https://github.com/hkaraoguz/quart_swagger | 3b480d79859fe096193150e7c7c5f9e1e8df4493 | b0a1f79ac225ff9f32c56b63d3e26e84d25c8d87 | refs/heads/master | 2020-06-04T07:36:09.424109 | 2019-06-14T11:02:41 | 2019-06-14T11:02:41 | 191,927,345 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import json
from quart import Blueprint, send_from_directory
swagger_ui = Blueprint("swagger_ui",
__name__,
static_folder='swaggerui')
@swagger_ui.route('/swaggerui/')
@swagger_ui.route('/swaggerui/<path:path>')
def show(path=None):
if path is None:
return send_from_directory(
swagger_ui._static_folder,
"index.html"
)
return send_from_directory(
swagger_ui._static_folder,
path
)
| UTF-8 | Python | false | false | 553 | py | 4 | quart_swagger_blueprint.py | 2 | 0.529837 | 0.529837 | 0 | 21 | 25.333333 | 51 |
venom1270/essay-grading | 15,865,609,211,841 | a48f8c5b990766420634c498efac124371184f44 | b8c90f28396cf985a0fc53d3585de2e40741226b | /orangecontrib/essaygrading/modules/Content.py | 734a8a6dcf8dee16589f6b40e42cd34aa3904eaa | []
| no_license | https://github.com/venom1270/essay-grading | 0f7d4071cf76148b16aba5588e93eb9eb262fe3c | 891e70beba1a3eb750ec78b5ba696070cff8bf3a | refs/heads/master | 2021-07-12T08:31:22.798423 | 2020-09-21T14:15:15 | 2020-09-21T14:15:15 | 199,926,873 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import string
import language_check
import collections
import spacy
from orangecontrib.essaygrading.utils import globals
from orangecontrib.essaygrading.modules.BaseModule import BaseModule
from orangecontrib.essaygrading.utils.lemmatizer import lemmatizeTokens
from spellchecker import SpellChecker
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
try:
from flair.embeddings import WordEmbeddings, FlairEmbeddings, DocumentPoolEmbeddings, Sentence
flair_available = True
except ModuleNotFoundError:
print("Flair module not found!")
flair_available = False
name = "Content"
class Content(BaseModule):
name = "Content"
def __init__(self, corpus, corpus_sentences, grades, source_text=None, graded_corpus=None,
word_embeddings=globals.EMBEDDING_TFIDF):
"""
Overrides parent __init__ and calls _load().
Special: some attributes require comparison with an already graded essay ("training set"). This is "graded_corpus".
If None, those attributes will be skipped.
:param corpus: Tokenized essay Corpus.
:param corpus_sentences: Tokenized (by sentence) essay Corpus.
:param grades: Array of essay grades (ints)
:param source_text: Corpus of source texts (optional)
:param graded_corpus: Corpus with grades ("training set").
:param word_embeddings: specify which word embeddings to use (TF-IDF or GloVe).
"""
self._load(corpus, corpus_sentences, grades, source_text=source_text, graded_corpus=graded_corpus,
word_embeddings=word_embeddings)
# graded corpus is used when comparing "Score point level for maximum cosine similarity over all score points"
# and "max cosine sim"
# if empty, just compare with "leave one out" method
def _load(self, corpus, corpus_sentences, grades, source_text=None, graded_corpus=None,
word_embeddings=globals.EMBEDDING_TFIDF):
"""
Calls parent _load() and sets additional parameters. Initializes spellchecker.
:param corpus: Tokenized essay Corpus.
:param corpus_sentences: Tokenized (by sentence) essay Corpus.
:param grades: Array of essay grades
:param source_text: Corpus of source texts (optional)
:param graded_corpus: Corpus with grades ("training set").
:param word_embeddings: specify which word embeddings to use (TF-IDF or GloVe).
"""
if corpus is not None and corpus_sentences is not None:
super()._load(corpus, corpus_sentences)
self.source_text = source_text
self.attributes = []
self.pos_tag_counter = [collections.Counter([x for x in doc]) for doc in self.corpus.pos_tags]
# clean stopwords
self.pos_tag_counter = [{key: value for key, value in doc.items()
if key not in string.punctuation and key != "''"} for doc in self.pos_tag_counter]
self.spellchecker = SpellChecker()
self.lang_check = None
self.lang_check_errors = None
self.tfidf_matrix = None
if grades is None:
self.essay_scores = None
else:
self.essay_scores = np.array(grades)
self.cosine = None
self.word_embeddings = word_embeddings
self.graded_corpus = graded_corpus
def _cosine_preparation(self):
"""
Calculates word embeddings and cosine similarities between essays.
Results are stored in internal variables.
"""
spacy_embeddings = None
flair_embeddings = None
if not flair_available and self.word_embeddings == globals.EMBEDDING_GLOVE_FLAIR:
print("Flair not available, switching to SpaCy embeddings")
self.word_embeddings = globals.EMBEDDING_GLOVE_SPACY
if self.word_embeddings == globals.EMBEDDING_GLOVE_SPACY:
spacy_embeddings = spacy.load("en_vectors_web_lg")
print("GloVe (SpaCy) vectors loaded!")
elif self.word_embeddings == globals.EMBEDDING_GLOVE_FLAIR:
flair_embeddings = DocumentPoolEmbeddings([WordEmbeddings("glove")])
print("GloVe (Flair) vectors loaded!")
tfidf_vectorizer = TfidfVectorizer(max_features=None, stop_words="english")
docs = lemmatizeTokens(self.corpus, join=True)
# append source/prompt text
if self.source_text is not None:
# Preprocess source text and add to documents for similarity calculation
spacy_preprocess = spacy.load("en_vectors_web_lg")
self.source_text = spacy_preprocess(self.source_text.lower())
docs.append(" ".join([token.lemma_ for token in self.source_text]))
# docs.append((lemmatizeTokens(self.source_text, join=True)[0]))
# print(docs)
if self.word_embeddings == globals.EMBEDDING_TFIDF:
self.tfidf_matrix = tfidf_vectorizer.fit_transform(docs)
elif self.word_embeddings == globals.EMBEDDING_GLOVE_SPACY:
self.tfidf_matrix = []
for doc in docs:
self.tfidf_matrix.append(spacy_embeddings(doc).vector)
self.tfidf_matrix = np.array(self.tfidf_matrix)
else:
# FLAIR
self.tfidf_matrix = []
for doc in docs:
document = Sentence(doc)
flair_embeddings.embed(document)
self.tfidf_matrix.append(np.array(document.get_embedding().detach().numpy()))
self.tfidf_matrix = np.array(self.tfidf_matrix)
# self.tfidf_matrix = self.tfidf_matrix * self.tfidf_matrix.T
print(self.tfidf_matrix)
self.cosine = cosine_similarity(self.tfidf_matrix)
print(self.cosine)
# print(self.cosine[-1][:-1])
# Now calculate cosine for ungraded vs graded essays
if self.graded_corpus:
docs = lemmatizeTokens(self.corpus, join=True)
docs += lemmatizeTokens(self.graded_corpus, join=True)
tfidf = tfidf_vectorizer.fit_transform(docs)
# Only take upper right quarter of the similarity matrix, because we are interested in graded vs. ungraded
# rows are ungraded, cols are graded
print("ALL")
print(cosine_similarity(tfidf))
self.cosine_graded = cosine_similarity(tfidf)[:len(self.corpus), len(self.corpus):]
# self.essay_scores = list(np.floor(self.graded_corpus.X[:, 5] / 2))
# self.essay_scores = list(np.floor(self.essay_scores / 2))
self.essay_scores = list(np.floor(self.essay_scores))
print("COSINE GRADED")
print(self.cosine_graded)
elif self.essay_scores is not None:
# self.essay_scores = list(np.floor(self.essay_scores / 2))
self.essay_scores = list(np.floor(self.essay_scores))
print("Cosine preparation finished")
def calculate_all(self, selected_attributes, attribute_dictionary, callback=None, proportions=None, i=None):
"""
Calculates all attributes in this module.
:param selected_attributes: Object with attributes to calculate (boolean flags). If None, calculate all.
:param attribute_dictionary: Attribute dictionary which will be filled with calculated attributes.
:param callback: Callback update function for progressbar.
:param proportions: List of possible progressbar values.
:param i: Index of current progressbar value.
:return: i (index of progressbar value).
"""
# Useful: https://community.languagetool.org/rule/list?lang=en
# Load language-check library if necessary (takes a while)
if selected_attributes is None or selected_attributes.cbNumberOfPunctuationErrors or selected_attributes.cbNumberOfCapitalizationErrors \
or selected_attributes.cbNumberOfGrammarErrors:
print("Language Check loading")
self.lang_check = language_check.LanguageTool("en-US")
print("Language Check LOADED")
self.lang_check_errors = [self.lang_check.check(doc) for doc in self.corpus.documents]
print("Language Check END")
if selected_attributes is None or selected_attributes.cbNumberOfSpellcheckingErrors:
errors = self.calculate_num_spellcheck_errors()
print("Number of spellcecking errors: ", errors)
attribute_dictionary["numberOfSpellcheckingErrors"] = errors
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbNumberOfCapitalizationErrors:
capitalization_errors = self.calculate_num_capitalization_errors()
print("Number of capitalization errors: ", capitalization_errors)
attribute_dictionary["numberOfCapitalizationErrors"] = capitalization_errors
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbNumberOfPunctuationErrors:
punctuation_errors = self.calculate_num_punctuation_errors()
print("Number of punctuation errors: ", punctuation_errors)
attribute_dictionary["numberOfPunctuationErrors"] = punctuation_errors
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbCosineSumOfCorrelationValues or selected_attributes.cbCosinePattern \
or selected_attributes.cbCosineSimilarityBestEssays or selected_attributes.cbCosineSimilarityMax \
or selected_attributes.cbCosineSimilaritySourceText:
self._cosine_preparation()
if (selected_attributes is None or selected_attributes.cbCosineSimilaritySourceText) and self.source_text is not None:
cosine_source_text = self.calculate_cosine_source_text()
print("Cosine similarity with source text: ", cosine_source_text)
attribute_dictionary["cosineWithSourceText"] = cosine_source_text
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbCosineSimilarityMax:
max_similarity_scores = self.calculate_cosine_max()
print("Cosine similarity Max: ", max_similarity_scores)
attribute_dictionary["scoreCosineSimilarityMax"] = max_similarity_scores
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbCosineSimilarityBestEssays:
top_essay_similarities = self.calculate_cosine_best_essays()
print("Cosine similarity with best essay: ", top_essay_similarities)
attribute_dictionary["cosineTopEssaySimilarityAverage"] = top_essay_similarities
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbCosinePattern:
cos_patterns = self.calculate_cosine_pattern()
print("Cosine Patterns: ", cos_patterns)
attribute_dictionary["cosinePattern"] = cos_patterns
# i = self._update_progressbar(callback, proportions, i)
if selected_attributes is None or selected_attributes.cbCosineSumOfCorrelationValues:
cos_weighted_sum = self.calculate_cosine_correlation_values()
print("cos_weighted_sum: ", cos_weighted_sum)
attribute_dictionary["cosineSumOfCorrelationValues"] = cos_weighted_sum
# i = self._update_progressbar(callback, proportions, i)
return i
def calculate_num_spellcheck_errors(self):
"""
Calculates number of spellchecking errors.
:return: Number of spellchecking errors of each essay.
"""
errors = np.array([len(self.spellchecker.unknown(tokens)) for tokens in self.corpus.tokens])
return errors
def calculate_num_capitalization_errors(self):
"""
Calculates number of capitalization errors.
:return: Number of capitalization errors of each essay.
"""
capitalization_errors = [sum([1 for e in doc_errors if e.category == "Capitalization"])
for doc_errors in self.lang_check_errors]
return capitalization_errors
def calculate_num_punctuation_errors(self):
"""
Calculates number of punctuation errors.
:return: Number of punctuation errors of each essay.
"""
punctuation_errors = [
sum([1 for e in doc_errors if e.category == "Punctuation" or e.category == "Miscellaneous"])
for doc_errors in self.lang_check_errors]
return punctuation_errors
def calculate_cosine_source_text(self):
"""
Calculates cosine similarity with source text.
:return: Cosine similarity with source text for each essay.
"""
return self.cosine[-1][:-1]
def calculate_cosine_max(self):
"""
Calculates/"predicts" grade of essay by taking the grade of the essay it's most similar to (based on cosine similarity).
:return: Grades of essays that essays are most similar to.
"""
max_similarity_scores = []
# Two cases: 1. if we are calculating attributes for an already graded essay
# then we proceed normally via leave one out strategy
# (we have no graded corpus AND have essay scores)
# 2. if we are calculating attributes for ungraded essays
# then we have to compare ungraded essays to graded essays (self.cosine_graded) IF they are provided
# (we have graded corpus AND have essay scores)
#
# Otherwise return empty array
if self.graded_corpus is None and self.essay_scores:
# Attributes for graded essay
for ii in range(len(self.corpus.documents)):
row = self.cosine[ii][:-1]
most_similar_doc_index = list(row).index(sorted(row, reverse=True)[1])
max_similarity_scores.append(self.essay_scores[most_similar_doc_index])
return max_similarity_scores
elif self.essay_scores:
# Attributes for ungraded essay
for ii in range(len(self.corpus.documents)):
row = self.cosine_graded[ii][:]
most_similar_doc_index = list(row).index(sorted(row, reverse=True)[0])
# print(ii, most_similar_doc_index)
max_similarity_scores.append(self.essay_scores[most_similar_doc_index])
return max_similarity_scores
else:
return np.zeros(len(self.corpus.documents))
def calculate_cosine_best_essays(self):
"""
Calculates cosine similarity with top 5 essays of the corpus.
:return: Cosine similarities with top essays.
"""
# TODO: now takes 5 essays, change to relative (eg 5%)
# If we have no essay scores, it means we are processing ungraded cropus only, return zeros
if not self.essay_scores:
return np.zeros(len(self.corpus.documents))
top_essay_indexes = [self.essay_scores.index(i) for i in sorted(self.essay_scores, reverse=True)[0:5]]
top_essay_similarities = []
# Two cases again
if self.graded_corpus is None:
for ii in range(len(self.corpus.documents)):
c = [self.cosine[ii][x] for x in top_essay_indexes if x != ii]
# if len(c) == 0:
# c = 0
# else:
# c = sum(c) / len(c)
c = sum(c) / max(1, len(c))
top_essay_similarities.append(c)
else:
for ii in range(len(self.corpus.documents)):
c = [self.cosine_graded[ii][x] for x in top_essay_indexes]
# if len(c) == 0:
# c = 0
# else:
# c = sum(c) / len(c)
c = sum(c) / max(1, len(c))
top_essay_similarities.append(c)
return top_essay_similarities
def calculate_cosine_pattern(self):
"""
Calculates "Cosine pattern".
:return: Cosine pattern for each essay.
"""
if not self.essay_scores:
return np.zeros(len(self.corpus.documents))
# sum (score*ranking) ; ranking je lahko 1. vrstni red po podobnosti; ali 2. podobnosti po vrsti
cos_patterns = []
n_max = int(max(self.essay_scores)) + 1
n_min = int(min(self.essay_scores))
# Two cases again
for ii in range(len(self.corpus.documents)):
# od 1 do 12 so score pointi
cos_correlation_values = []
for sp in range(n_min, n_max):
sp_essays = [index for index, value in enumerate(self.essay_scores) if value == sp and index != ii]
if self.graded_corpus is None:
sp_cos = [self.cosine[ii][x] for x in sp_essays]
else:
sp_cos = [self.cosine_graded[ii][x] for x in sp_essays]
if len(sp_cos) == 0:
sp_cos = 0
else:
sp_cos = sum(sp_cos) / len(sp_cos)
cos_correlation_values.append(sp_cos)
cos_ratings = np.argsort(np.argsort(cos_correlation_values)) + 1 # twice, to get correct 'rankings'
cos_score_points = np.array(range(n_min, n_max))
cos_pattern = np.sum(cos_score_points * cos_ratings)
# NORMALIZATION - works
pat_max = np.dot(np.array(range(n_min, n_max)), np.array(range(n_min, n_max)))
pat_min = np.dot(np.array(range(n_min, n_max)), np.flip(np.array(range(n_min, n_max))))
cos_pattern = (n_max - n_min - 1) * (cos_pattern - pat_min) / (pat_max - pat_min) + n_min
cos_patterns.append(cos_pattern)
return cos_patterns
def calculate_cosine_correlation_values(self):
"""
Calculates "Cosine correlation values".
:return: Cosine weighted sum of correlation values for each essay.
"""
if not self.essay_scores:
return np.zeros(len(self.corpus.documents))
# sesstejs cosine zgornje polovice, odstejes cosine spodne polovice
cos_weighted_sum = []
n_max = int(max(self.essay_scores)) + 1
n_min = int(min(self.essay_scores))
# Two cases again
for ii in range(len(self.corpus.documents)):
cos_correlation_values = []
for sp in range(n_min, n_max):
sp_essays = [index for index, value in enumerate(self.essay_scores) if value == sp and index != ii]
if self.graded_corpus is None:
sp_cos = [self.cosine[ii][x] for x in sp_essays]
else:
sp_cos = [self.cosine_graded[ii][x] for x in sp_essays]
if len(sp_cos) == 0:
sp_cos = 0
else:
sp_cos = sum(sp_cos) / len(sp_cos) # TODO: do we use average or just the highest???
cos_correlation_values.append(sp_cos)
# Now we have an array of cosine values (lowest to highest SP), define lower and upper half and subtract
half = int((n_max - n_min) / 2)
cos_weighted_sum.append(sum(cos_correlation_values[half:]) - sum(cos_correlation_values[:half]))
return cos_weighted_sum
# For optimization resaons, we check for grammar errors in Content.py module instead of Grammar.py module.
def calculate_num_grammar_errors(self):
"""
Calculates number of grammar errors.
For optimization reasons, this method is in this module. It would be more precise if it was in Grammar.py.
:return:
"""
# TODO: https: // www.tutorialspoint.com / python / python_spelling_check.htm
grammar_errors = [sum([1 for e in doc_errors if e.category == "Grammar"])
for doc_errors in self.lang_check_errors]
return grammar_errors
| UTF-8 | Python | false | false | 20,303 | py | 87 | Content.py | 52 | 0.625129 | 0.623061 | 0 | 430 | 46.216279 | 145 |
CurtisSlone/SchoolProjects | 1,443,109,053,203 | a259d2c46ed76a558b77353ac66a418066b59f9d | 1ca933b302447f0bf37c7e2011c20b152d0d3c0c | /Python/Flask_Website/app.py | 0a8af24384efa33072b6457a8086f0c20452722d | []
| no_license | https://github.com/CurtisSlone/SchoolProjects | eb851bd717efadb28f88cafe76d750eea3826b72 | 04d8c641d967615549931a94609811003b40a608 | refs/heads/main | 2023-07-05T17:10:01.587093 | 2021-08-19T21:14:26 | 2021-08-19T21:14:26 | 398,051,539 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
File: app.py
Author: Curtis Slone
Date: 12 Feb 2021
Purpose: Basic Flask Website With Password Entry and Input Validation
"""
import random
from datetime import datetime
import json
from flask import Flask, render_template, request, jsonify, redirect, url_for
from passlib.hash import sha256_crypt
from flask_login import LoginManager, login_required, \
current_user, login_user, logout_user, UserMixin
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisismysecretkeydonotstealit'
login_manager = LoginManager()
login_manager.init_app(app)
#################
### ROUTES ###
#################
@app.route('/')
def home():
"""Return hompage"""
return render_template('home.html')
@app.route('/sign-up', methods=['POST'])
def sign_up():
""" Process Form"""
name = request.form['name']
password = request.form['pass']
email = request.form['email']
### Create Model Object, Hash Password Add To Flat File ##
if name and password and email:
new_member = User()
new_member.id = random.randrange(400)
new_member.name = name
new_member.password = do_hash_password(password)
new_member.email = email
send_model_object_to_flat_file(new_member)
return json.dumps({'success' : 'Go ahead and sign-in to access the member\'s area!'})
return json.dumps({'error' : 'Missing data. Check your information!'})
@app.route('/member')
@login_required
def member():
""" member only area """
return render_template('members.html',name=current_user.name)
@app.route('/password-change')
@login_required
def password_change():
""" Change password """
return render_template('password.html',name=current_user.name)
@app.route('/email-check')
def email_check():
""" check members file for currently used email """
tmp = return_members_list_from_flat_file()
tmp_dict = {}
i = 0
for mem in tmp:
email = mem.get('EMAIL')
tmp_dict[i] = email
i += 1
return jsonify(tmp_dict)
@app.route('/password-check', methods=['POST'])
@login_required
def password_check():
""" compare common paswords file and new-password input"""
tmp_list = open("CommonPassword.txt").readlines()
pass_list = [x[:-1] for x in tmp_list]
old_password = request.form['old_password']
new_password = request.form['new_password']
verify_new_pass = request.form['verify_new_pass']
members_list = return_members_list_from_flat_file()
hashed_password = " "
for memb in members_list:
if memb['EMAIL'] == current_user.email:
if check_hash_password(old_password, memb['PASSHASH']):
hashed_password = memb['PASSHASH']
break
try:
check_hash_password(old_password,hashed_password)
except ValueError:
error = "Incorrect Old Password"
return jsonify({'error' : error})
if new_password != verify_new_pass:
error = "New Passwords Don't Match"
return jsonify({'error' : error})
for i in pass_list:
if i == new_password:
error = "New Password Matched Compromised Password"
return jsonify({'error' : error})
for memb in members_list:
if memb['EMAIL'] == current_user.email:
memb['PASSHASH'] = do_hash_password(new_password)
jsond = {
"members" : members_list
}
with open('members.json','w') as write_file:
json.dump(jsond,write_file,indent=4)
return jsonify({'success' : 'You successfully changed your password'})
@app.route('/login', methods=['POST'])
def login():
"""login form """
ip_address = request.remote_addr
now = datetime.now()
date_time = now.strftime("%d/%m/%Y, %H:%M:%S")
email = request.form['email']
password = request.form['pass']
user = False
hash_match = False
temp = return_members_list_from_flat_file()
for memb in temp:
if memb['EMAIL'] == email:
user = True
if check_hash_password(password, memb['PASSHASH']):
hash_match = True
break
if user and hash_match:
member_to_login = get_member_object(temp,email)
login_user(member_to_login)
return jsonify({'success' : 'Awesome! Redirecting you now.'})
if user and not hash_match:
file_object = open('log.txt', 'a')
file_object.write('\\ FAILED LOGIN : ' + date_time + ' IP: ' + ip_address)
return jsonify({'error' : 'Invalid Password'})
if not user and not hash_match:
file_object = open('log.txt', 'a')
file_object.write('\n\\\\ FAILED LOGIN : ' + date_time + ' IP: ' + ip_address + '\n')
return jsonify({'error' : 'Couldn\'t log you in. Check your information or sign-up!'})
@app.route('/logout')
@login_required
def logout():
""" Log User Out """
logout_user()
return redirect(url_for('home'))
#################
## USER LOADER ##
#################
@login_manager.user_loader
def load_user(user_id):
""" Load User based on member id """
temp = return_members_list_from_flat_file()
for memb in temp:
if memb['ID'] == int(user_id):
loaded_member = get_member_object(temp,memb['EMAIL'])
return loaded_member
return jsonify("User not loaded")
#################
## APP METHODS ##
#################
def do_hash_password(pass_string):
""" hash password """
print(f'{pass_string}')
return sha256_crypt.hash(pass_string)
def check_hash_password(pass_string, hash_string):
""" unhash password """
return sha256_crypt.verify(pass_string,hash_string)
def send_model_object_to_flat_file(new_user):
""" Convert object to JSON format and dump onto file"""
raw_data = {
'ID' : new_user.id,
'NAME' : new_user.name,
'PASSHASH' : new_user.password,
'EMAIL' : new_user.email
}
temp = return_members_list_from_flat_file()
temp.append(raw_data)
jsond = {
"members" : temp
}
with open('members.json','w') as write_file:
json.dump(jsond,write_file,indent=4)
def return_members_list_from_flat_file():
""" return mambers list """
with open("members.json","r") as read_file:
data = json.load(read_file)
return data['members']
def search_member(member_email):
""" search member based on member_email """
members_list = return_members_list_from_flat_file()
for memb in members_list:
if member_email == memb['EMAIL']:
return memb['EMAIL']
return jsonify({'error' : 'Email not found'})
def get_member_object(memberlist,member_email):
""" get user object from flat file base on email"""
for memb in memberlist:
if memb['EMAIL'] == member_email:
user_login = User()
user_login.id = memb['ID']
user_login.name = memb['NAME']
user_login.email = memb['EMAIL']
return user_login
return json.dumps({'Error' : 'Unable to find member information'})
#################
## MODELS ##
#################
class User(UserMixin):
""" User Class """
id = " "
name = " "
password = " "
email = " "
| UTF-8 | Python | false | false | 7,156 | py | 45 | app.py | 31 | 0.603969 | 0.600755 | 0 | 201 | 34.58209 | 94 |
HyShai/youtube-dl | 18,545,668,830,196 | e405a9ec1cc0c4929d5f1cbfe2475364aee02700 | 0255d8532888ffee9c5d2fe9bb737a304efabff9 | /youtube_dl/extractor/everyonesmixtape.py | d872d828fcc8e10fea4770e1e56ab21cda027336 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
]
| permissive | https://github.com/HyShai/youtube-dl | d1b81ae13a5d2032a706ff1b291a606f033faf8c | 59fc89b704e7f9e8ad9c881c32e111ba5e79daba | refs/heads/ytdl-pythonista | 2020-02-24T13:11:50.972561 | 2016-05-27T17:02:51 | 2016-05-27T17:02:51 | 29,873,426 | 48 | 9 | Unlicense | true | 2017-12-11T08:04:20 | 2015-01-26T17:45:34 | 2017-10-08T12:44:37 | 2016-05-27T17:02:52 | 27,352 | 21 | 6 | 2 | Python | false | null | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class EveryonesMixtapeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
_TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
"info_dict": {
'id': '5bfseWNmlds',
'ext': 'mp4',
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
"uploader": "FKR.TV",
"uploader_id": "frenchkissrecords",
"description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
"upload_date": "20081015"
},
'params': {
'skip_download': True, # This is simply YouTube
}
}, {
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
'info_dict': {
'id': 'm7m0jJAbMQi',
'title': 'Driving',
},
'playlist_count': 24
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
pllist_req = compat_urllib_request.Request(pllist_url)
pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
playlist_list = self._download_json(
pllist_req, playlist_id, note='Downloading playlist metadata')
try:
playlist_no = next(playlist['id']
for playlist in playlist_list
if playlist['code'] == playlist_id)
except StopIteration:
raise ExtractorError('Playlist id not found')
pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
pl_req = compat_urllib_request.Request(pl_url)
pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
playlist = self._download_json(
pl_req, playlist_id, note='Downloading playlist info')
entries = [{
'_type': 'url',
'url': t['url'],
'title': t['title'],
} for t in playlist['tracks']]
if mobj.group('songnr'):
songnr = int(mobj.group('songnr')) - 1
return entries[songnr]
playlist_title = playlist['mixData']['name']
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'entries': entries,
}
| UTF-8 | Python | false | false | 2,872 | py | 335 | everyonesmixtape.py | 333 | 0.560237 | 0.548398 | 0 | 79 | 35.35443 | 285 |
pseudoPixels/SciWorCS | 15,195,594,332,351 | 728eacc27557bc9514edfcf6efdf0e6d562d3d8b | 893f83189700fefeba216e6899d42097cc0bec70 | /bioinformatics/photoscan-pro/python/lib/python3.5/site-packages/pyside2uic/objcreator.py | deed44f78dadd42ca5225aed8d3364096f95a0ee | [
"GPL-3.0-only",
"Apache-2.0",
"MIT",
"Python-2.0"
]
| permissive | https://github.com/pseudoPixels/SciWorCS | 79249198b3dd2a2653d4401d0f028f2180338371 | e1738c8b838c71b18598ceca29d7c487c76f876b | refs/heads/master | 2021-06-10T01:08:30.242094 | 2018-12-06T18:53:34 | 2018-12-06T18:53:34 | 140,774,351 | 0 | 1 | MIT | false | 2021-06-01T22:23:47 | 2018-07-12T23:33:53 | 2018-12-06T20:27:54 | 2021-06-01T22:23:45 | 593,244 | 0 | 1 | 4 | Python | false | false | # This file is part of the PySide project.
#
# Copyright (C) 2009-2011 Nokia Corporation and/or its subsidiary(-ies).
# Copyright (C) 2010 Riverbank Computing Limited.
# Copyright (C) 2009 Torsten Marek
#
# Contact: PySide team <pyside@openbossa.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys
import os.path
from pyside2uic.exceptions import NoSuchWidgetError, WidgetPluginError
if sys.hexversion >= 0x03000000:
from pyside2uic.port_v3.load_plugin import load_plugin
else:
from pyside2uic.port_v2.load_plugin import load_plugin
# The list of directories that are searched for widget plugins. This is
# exposed as part of the API.
widgetPluginPath = [os.path.join(os.path.dirname(__file__), 'widget-plugins')]
MATCH = True
NO_MATCH = False
MODULE = 0
CW_FILTER = 1
class QObjectCreator(object):
def __init__(self, creatorPolicy):
self._cpolicy = creatorPolicy
self._cwFilters = []
self._modules = [self._cpolicy.createQtWidgetsWrapper()]
# Get the optional plugins.
for plugindir in widgetPluginPath:
try:
plugins = os.listdir(plugindir)
except:
plugins = []
for filename in plugins:
if not filename.endswith('.py') or filename == '__init__.py':
continue
filename = os.path.join(plugindir, filename)
plugin_globals = {
"MODULE": MODULE,
"CW_FILTER": CW_FILTER,
"MATCH": MATCH,
"NO_MATCH": NO_MATCH}
plugin_locals = {}
if load_plugin(open(filename), plugin_globals, plugin_locals):
pluginType = plugin_locals["pluginType"]
if pluginType == MODULE:
modinfo = plugin_locals["moduleInformation"]()
self._modules.append(self._cpolicy.createModuleWrapper(*modinfo))
elif pluginType == CW_FILTER:
self._cwFilters.append(plugin_locals["getFilter"]())
else:
raise WidgetPluginError("Unknown plugin type of %s" % filename)
self._customWidgets = self._cpolicy.createCustomWidgetLoader()
self._modules.append(self._customWidgets)
def createQObject(self, classname, *args, **kwargs):
classType = self.findQObjectType(classname)
if classType:
return self._cpolicy.instantiate(classType, *args, **kwargs)
raise NoSuchWidgetError(classname)
def invoke(self, rname, method, args=()):
return self._cpolicy.invoke(rname, method, args)
def findQObjectType(self, classname):
for module in self._modules:
w = module.search(classname)
if w is not None:
return w
return None
def getSlot(self, obj, slotname):
return self._cpolicy.getSlot(obj, slotname)
def addCustomWidget(self, widgetClass, baseClass, module):
for cwFilter in self._cwFilters:
match, result = cwFilter(widgetClass, baseClass, module)
if match:
widgetClass, baseClass, module = result
break
self._customWidgets.addCustomWidget(widgetClass, baseClass, module)
| UTF-8 | Python | false | false | 3,925 | py | 877 | objcreator.py | 426 | 0.627771 | 0.616561 | 0 | 112 | 34.044643 | 89 |
lan2720/my_blog | 18,992,345,409,030 | 1ab4fc74d1de0fcedefb22ddb2feed65f49a5f2c | a281e6960b313c019b98d5a7afb4f79cdee6ca16 | /about_me/migrations/0002_about_created_time.py | b7928ddb1c0908d3e8793c27cacfb2e69b876613 | []
| no_license | https://github.com/lan2720/my_blog | b3988f895ba197968153d9f5bc569e1d24b72239 | f5db989929e60bc974fe60a931a07804acc1e863 | refs/heads/master | 2020-12-24T14:55:50.452891 | 2015-04-28T03:20:50 | 2015-04-28T03:20:50 | 34,705,733 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('about_me', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='about',
name='created_time',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 26, 14, 2, 39, 853922), auto_now_add=True),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 514 | py | 18 | 0002_about_created_time.py | 11 | 0.599222 | 0.554475 | 0 | 21 | 23.47619 | 117 |
Gary2018X/data_processing_primer | 4,174,708,253,098 | 604bb8f50ea1c4c8f9439ce6e13b0bb3dcba0703 | 8ecd1d9d1760acbf1f9b999363a5d150460e1c2a | /E11-8.py | 92bb97f303a575dee494ab240865f13de9898d1d | []
| no_license | https://github.com/Gary2018X/data_processing_primer | 0910c7c37af7ed03546520303c7d1660c62b0254 | b0d2cb16b4fcaed08f01accb4695232a33131d28 | refs/heads/master | 2023-01-14T17:39:48.856606 | 2020-11-17T03:21:32 | 2020-11-17T03:21:32 | 298,432,953 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# author:Gary
import pandas as pd
from sklearn.model_selection import train_test_split
# ********决策树:实现******#
mush_data = pd.read_csv("agaricus-lepiota.data", header=None)
mush_data.columns = ["classes", "cap_ shape", "cap_surface",
"cap_color", "odor", "bruises",
"gill_attachment", "gill_spacing",
"gill_size", "gill_color", "stalk_shape",
"stalk_root", "stalk_surface_above_ring",
"stalk_surface_below_ring",
"stalk_color_above__ring",
"stalk_color_below_ring", "veil_type", "veil_color", "ring_number", "ring_type",
"spore_print color", "population", "habitat"]
# 对颜色的种类等不能由数值大小决定的类别型变量,变换为yes/no或0/1为属性值的虚拟(dummy) 变量
mush_data_dummy = pd.get_dummies(
mush_data[["gill_color", "gill_attachment", "odor", "cap_color"]])
# 设立因变量flg,即在DataFrame的mush_ data_ _dummy中 添加flg列,
# 其值根据classes列中同行的值是否是"p"而赋予1或0。
mush_data_dummy["flg"] = mush_data["classes"].map(lambda x: 1 if x == "p" else 0)
# 指定自变量和因变量。即,将DataFrame的mush. _data_ dummy分为数据和类别两个部分:
X = mush_data_dummy.drop("flg", axis=1)
Y = mush_data_dummy["flg"]
# 划分训练集和测试集
train_X, test_X, train_y, test_y = train_test_split(X, Y, random_state=42)
# 从库函数/包中导入模型
from sklearn.tree import DecisionTreeClassifier
# 构建模型
model = DecisionTreeClassifier()
# 模型的学习
model.fit(train_X, train_y)
# 计算并打印正确率
print(model.score(test_X, test_y))
| UTF-8 | Python | false | false | 1,758 | py | 101 | E11-8.py | 100 | 0.619624 | 0.612903 | 0 | 42 | 34.428571 | 101 |
knighton/sunyata_2017 | 7,842,610,333,373 | 6885f11918ffb2714fd2d68b3bbb85c51b66b82a | 525c6a69bcf924f0309b69f1d3aff341b06feb8e | /sunyata/backend/pytorch/layer/dot/__init__.py | cc8a470e8689de85a29459f4e6bc79ee2604bdb6 | []
| no_license | https://github.com/knighton/sunyata_2017 | ba3af4f17184d92f6277d428a81802ac12ef50a4 | 4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8 | refs/heads/master | 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from ....base.layer.dot import BaseDotAPI
from .conv import PyTorchConvAPI
from .dense import PyTorchDenseAPI
class PyTorchDotAPI(BaseDotAPI, PyTorchConvAPI, PyTorchDenseAPI):
def __init__(self):
BaseDotAPI.__init__(self)
PyTorchConvAPI.__init__(self)
PyTorchDenseAPI.__init__(self)
| UTF-8 | Python | false | false | 313 | py | 253 | __init__.py | 253 | 0.70607 | 0.70607 | 0 | 10 | 30.3 | 65 |
tobsval/coding-challenges | 12,816,182,429,573 | 62af71f1f5ddf222b5f205ea6ca22b20f79ba7a9 | 232a0afd9be460ffb55ca01742f42920923d9663 | /human_readable_time.py | 589a567d5e2be6e1a63b2421412848e132aba66f | []
| no_license | https://github.com/tobsval/coding-challenges | 8bc6ed921bf1e56ecaff83024dda3ac184824acb | 9add5a77e91b3a8f8583b3306c76e2b476010cf5 | refs/heads/master | 2020-04-18T14:52:19.510947 | 2019-03-30T11:07:01 | 2019-03-30T11:07:01 | 167,599,938 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #https://www.codewars.com/kata/human-readable-time/train/python
def make_readable(seconds : int):
mins, secs = divmod(seconds, 60)
hours, mins = divmod(mins, 60)
readable_time = '{:d}:{:02d}:{:02d}'.format(hours, mins, secs)
return readable_time
| UTF-8 | Python | false | false | 264 | py | 8 | human_readable_time.py | 8 | 0.666667 | 0.636364 | 0 | 7 | 36.571429 | 66 |
ashrystein/Python-Assignments-Implementation-of-Andrew-Ng-s-Machine-Learning-Course | 8,546,984,952,957 | 0763730a3534dc230302a47f58b1cae12c7204cc | 662bb1f69e37e6db2823122ccdbd8a0404e2f05f | /week8_ex7/pca.py | eef5f19891e86d30155c7d7ab85abe5c8f88a3aa | []
| no_license | https://github.com/ashrystein/Python-Assignments-Implementation-of-Andrew-Ng-s-Machine-Learning-Course | c9c9f731fafcc2195f687f299c95e1796196a1cd | b55de1a39e033df5e09c3a3f65ab5fb04ec714c1 | refs/heads/master | 2020-06-08T06:14:52.395961 | 2019-08-15T15:58:53 | 2019-08-15T15:58:53 | 193,175,025 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as imag
from scipy.io import loadmat
import scipy.optimize as opt
from sklearn.svm import SVC
from findClosestCentroids import findClosestCentroids
from computeCentroids import computeCentroids
from runkMeans import runkMeans
from kMeansInitCentroids import kMeansInitCentroids
from featureNormalize import featureNormalize
def pca(X):
(m,n) = X.shape
U = np.zeros(n)
S = np.zeros(n)
sigma = (1/m)*(X.T@X)
U, S, V = np.linalg.svd(sigma)
return U,S,V | UTF-8 | Python | false | false | 570 | py | 68 | pca.py | 66 | 0.766667 | 0.764912 | 0 | 20 | 27.55 | 53 |
downneck/vyvyan | 5,463,198,439,208 | fed579765d90b7cb5af9c1c584a8357a6ff27852 | 31a77b7603fdf3c9ca3a8fb19f9d11ff2a930836 | /vyvyan/ldap/__init__.py | 9d75a3b98087d6eedb5b61725521c71ab70f6cf7 | [
"Apache-2.0"
]
| permissive | https://github.com/downneck/vyvyan | b29905cc0f21acd9fb6a964e237205ca6bbb383b | e5375b9ed049d47cff69618359ce45159b53853b | refs/heads/master | 2021-01-10T12:20:00.338832 | 2016-09-01T18:36:01 | 2016-09-01T18:36:01 | 43,097,262 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2015 WebEffects Network, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this module holds methods for dealing with LDAP
this should only, generally speaking, be used to talk with an LDAP server
all user interaction should be done in the vyvyan.API_userdata module
"""
# imports
import os
import ldap
import vyvyan.validate
import vyvyan.API_userdata as userdata
# db imports
from vyvyan.vyvyan_models import *
class LDAPError(Exception):
pass
def ld_connect(cfg, server):
"""
[description]
open a connection to an LDAP server
[parameter info]
required:
cfg: the config object. useful everywhere
server: server to connect to
[return value]
returns the open ldap connection object
"""
try:
# stitch together some useful info
admin_dn = "cn=%s,dc=%s" % (cfg.ldap_admin_cn, ',dc='.join(cfg.default_domain.split('.')))
ld_server_string = "ldaps://"+server
# init the connection to the ldap server
ldcon = ldap.initialize(ld_server_string)
ldcon.simple_bind_s(admin_dn, cfg.ldap_admin_pass)
except ldap.LDAPError, e:
cfg.log.debug("error connecting to ldap server: %s" % ldap_master)
cfg.log.debug("INFO DUMP:\n")
cfg.log.debug("admin_dn: %s\nld_server_string: %s" % (admin_dn, ld_server_string))
raise LDAPError(e)
# we managed to open a connection. return it so it can be useful to others
return ldcon
def uadd(cfg, user, server=None):
"""
[description]
add a user to ldap
[parameter info]
required:
cfg: the config object. useful everywhere
user: the ORM user object
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
# just checking....
if user:
if not user.active:
raise LDAPError("user %s is not active. please set the user active, first." % user.username)
else:
raise LDAPError("empty ORM object passed for user")
# stitch together the LDAP, fire it into the ldap master server
try:
# construct an array made of the domain parts, stitch it back together in a way
# that LDAP will understand
domain_parts = user.domain.split('.')
dn = "uid=%s,ou=%s,dc=" % (user.username, cfg.ldap_users_ou)
dn += ',dc='.join(domain_parts)
add_record = [('objectclass', ['inetOrgPerson','person','ldapPublicKey','posixAccount'])]
full_name = user.first_name + " " + user.last_name
if user.ssh_public_key:
attributes = [('gn', user.first_name),
('sn', user.last_name),
('gecos', full_name),
('cn', full_name),
('uid', user.username),
('uidNumber', str(user.uid)),
('gidNumber', cfg.ldap_default_gid),
('homeDirectory', user.hdir),
('loginShell', user.shell),
('mail', user.email),
('sshPublicKey', user.ssh_public_key),
]
else:
attributes = [('gn', user.first_name),
('sn', user.last_name),
('gecos', full_name),
('cn', full_name),
('uid', user.username),
('uidNumber', str(user.uid)),
('gidNumber', cfg.ldap_default_gid),
('homeDirectory', user.hdir),
('loginShell', user.shell),
('mail', user.email),
]
add_record += attributes
# connect to ldap server(s) and do stuff
if server:
servers = [server]
else:
servers = cfg.ldap_servers
for myserver in servers:
# create a connection to the server
ldcon = ld_connect(cfg, myserver)
print "adding ldap user entry for user %s to domain %s" % (user.username, user.domain)
ldcon.add_s(dn,add_record)
except ldap.LDAPError, e:
# don't leave dangling connections
ldcon.unbind()
raise LDAPError(e)
# close the LDAP connection
ldcon.unbind()
# give something back to the community
return "success"
except ldap.LDAPError, e:
raise LDAPError(e)
def uremove(cfg, user, server=None):
"""
[description]
remove a user
[parameter info]
required:
cfg: the config object. useful everywhere
user: the ORM user object
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
# just checking...
if not user:
raise LDAPError("empty ORM object passed for user")
# do the needful
try:
# construct an array made of the domain parts, stitch it back together in a way
# that LDAP will understand
domain_parts = user.domain.split('.')
udn = "uid=%s,ou=%s,dc=" % (user.username, cfg.ldap_users_ou)
udn += ',dc='.join(domain_parts)
# connect to ldap server(s) and do stuff
if server:
servers = [server]
else:
servers = cfg.ldap_servers
for server in cfg.servers:
# create a connection to the server
ldcon = ld_connect(cfg, server)
ldcon.delete_s(udn)
ldcon.unbind()
# give something back to the community
return "success"
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
def urefresh_all(cfg, server=None):
"""
[description]
refresh the LDAP users database. drop all users, add them back in again
[parameter info]
required:
cfg: the config object. useful everywhere
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
# some vars we'll need later
domainlist = []
ulist = []
userlist = []
# do the needful
try:
# construct our user list
for user in cfg.dbsess.query(Users).\
filter(Users.active==True).all():
userlist.append(user)
if user.domain not in domainlist:
domainlist.append(user.domain)
# suss out the server situation
if server:
servers = [server]
else:
servers = cfg.ldap_servers
# connect to ldap server(s) and do stuff
for myserver in servers:
for domain in domainlist:
# make an array of the domain parts. stitch it back together in a way
# ldap will understand
domain_parts = domain.split('.')
udn ="ou=%s,dc=" % cfg.ldap_users_ou
udn += ',dc='.join(domain_parts)
# create a connection to the server
ldcon = ld_connect(cfg, myserver)
search = '(objectClass=person)'
# ALL USERS BALEETED
for result in ldcon.search_s(udn, ldap.SCOPE_SUBTREE, search):
ldcon.delete_s(result[0])
# unbind thyself
ldcon.unbind()
# add the users back in
for user in userlist:
uadd(cfg, user, server=myserver)
# give something back to the community
return "success"
# something horrible has happened.
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
# we may get rid of this completely...
def uupdate(cfg, user, server=None):
"""
[description]
update a user entry
[parameter info]
required:
cfg: the config object. useful everywhere
user: the ORM user object
optional:
server: restrict activity to a single server
[return value]
no explicit return
"""
try:
# construct an array made of the domain parts, stitch it back together in a way
# that LDAP will understand
domain_parts = user.domain.split('.')
dn = "uid=%s,ou=%s,dc=" % (user.username, cfg.ldap_users_ou)
dn += ',dc='.join(domain_parts)
# we only really care about active users
if not user.active:
raise LDAPError("user %s is not active. please set the user active, first." % u.username)
# connect ldap server(s) and do stuff
if server:
servers = [server]
else:
servers = cfg.ldap_servers
# stitch together the LDAP, fire it into the ldap master server
full_name = user.first_name + " " + user.last_name
if user.ssh_public_key:
mod_record = [(ldap.MOD_REPLACE, 'gn', user.first_name),
(ldap.MOD_REPLACE, 'sn', user.last_name),
(ldap.MOD_REPLACE, 'gecos', full_name),
(ldap.MOD_REPLACE, 'cn', full_name),
(ldap.MOD_REPLACE, 'uidNumber', str(user.uid)),
(ldap.MOD_REPLACE, 'gidNumber', cfg.ldap_default_gid),
(ldap.MOD_REPLACE, 'homeDirectory', user.hdir),
(ldap.MOD_REPLACE, 'loginShell', user.shell),
(ldap.MOD_REPLACE, 'mail', user.email),
(ldap.MOD_REPLACE, 'sshPublicKey', user.ssh_public_key),
]
else:
mod_record = [(ldap.MOD_REPLACE, 'gn', user.first_name),
(ldap.MOD_REPLACE, 'sn', user.last_name),
(ldap.MOD_REPLACE, 'gecos', full_name),
(ldap.MOD_REPLACE, 'cn', full_name),
(ldap.MOD_REPLACE, 'uidNumber', str(user.uid)),
(ldap.MOD_REPLACE, 'gidNumber', cfg.ldap_default_gid),
(ldap.MOD_REPLACE, 'homeDirectory', user.hdir),
(ldap.MOD_REPLACE, 'loginShell', user.shell),
(ldap.MOD_REPLACE, 'mail', user.email),
]
# do the needful, once for each server in the array
for myserver in servers:
# create a connection to the server
ldcon = ld_connect(cfg, myserver)
print "updating ldap user entry for user %s on domain %s" % (user.username, user.domain)
ldcon.modify_s(dn, mod_record)
# close the LDAP connection
ldcon.unbind()
# give something back to the community
return "success"
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
def gadd(cfg, group, server=None):
"""
[description]
add a group
[parameter info]
required:
cfg: the config object. useful everywhere
group: the ORM group object
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
try:
if group:
# construct an array made of the domain parts, stitch it back together in a way
# that LDAP will understand to create our group
domain_parts = group.domain.split('.')
gdn = "cn=%s,ou=%s,dc=" % (group.groupname, cfg.ldap_groups_ou)
gdn += ',dc='.join(domain_parts)
ngdn = "cn=%s,ou=%s,dc=" % (group.groupname, cfg.ldap_netgroups_ou)
ngdn += ',dc='.join(domain_parts)
# construct the list of users in this group three ways
# ACHTUNG: we may not need both memberlist and memberoflist. test this!
memberoflist = [] # groupOfNames stylee
memberlist = [] # posixGroup steez
netgrouplist = [] # nisNetgroups are still a thing?
# iterate over all users assigned to this group
for ugmap in cfg.dbsess.query(UserGroupMapping).\
filter(UserGroupMapping.groups_id==group.id):
user = cfg.dbsess.query(Users).\
filter(Users.id==ugmap.users_id).first()
# construct the memberOf list for groupOfNames
memberoflist.append(user.username)
# construct the member list for posixGroup
ldap_user_dn = "uid=%s,ou=%s,dc=" % (user.username, cfg.ldap_groups_ou)
ldap_user_dn += ',dc='.join(domain_parts)
memberlist.append(ldap_user_dn)
# construct the netgroup list for nisNetgroup
netgrouplist.append("(-,%s,)" % user.username)
else:
raise LDAPError("group object not supplied, aborting")
# connect ldap server(s) and do stuff
if server:
servers = [server]
else:
servers = cfg.ldap_servers
for myserver in servers:
# construct the Group record to add
g_add_record = [('objectClass', ['top', 'posixGroup', 'groupOfNames'])]
if memberoflist:
g_attributes = [('description', group.description),
('cn', group.groupname),
('gidNumber', str(group.gid)),
('memberUid', memberoflist),
('member', memberlist),
]
else:
g_attributes = [('description', group.description),
('cn', group.groupname),
('gidNumber', str(group.gid)),
]
# construct the nisNetgroup record to add
g_add_record = [('objectClass', ['top', 'nisNetgroup'])]
if netgrouplist:
ng_attributes = [('description', group.description),
('cn', group.groupname),
('nisNetgroupTriple', netgrouplist),
]
else:
ng_attributes = [('description', group.description),
('cn', group.groupname),
]
# stitch the records together
g_add_record += g_attributes
ng_add_record += ng_attributes
# create a connection to the ldap server
ldcon = ld_connect(cfg, myserver)
# talk about our feelings
print "adding ldap Group record for %s" % (gdn)
print "adding ldap nisNetgroup record for %s" % (ngdn)
# slam the records into the server
ldcon.add_s(gdn, g_add_record)
ldcon.add_s(ngdn, ng_add_record)
ldcon.unbind()
# give something back to the community
return "success"
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
def gupdate(cfg, group, server=None):
"""
[description]
update a group
[parameter info]
required:
cfg: the config object. useful everywhere
group: the ORM group object
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
try:
if group:
# construct an array made of the domain parts, stitch it back together in a way
# that LDAP will understand to create our group
domain_parts = group.domain.split('.')
gdn = "cn=%s,ou=%s,dc=" % (group.groupname, cfg.ldap_groups_ou)
gdn += ',dc='.join(domain_parts)
ngdn = "cn=%s,ou=%s,dc=" % (group.groupname, cfg.ldap_netgroups_ou)
ngdn += ',dc='.join(domain_parts)
# construct the list of users in this group three ways
# ACHTUNG: we may not need both memberlist and memberoflist. test this!
memberoflist = [] # groupOfNames stylee
memberlist = [] # posixGroup steez
netgrouplist = [] # nisNetgroups are still a thing?
# iterate over all users assigned to this group
for ugmap in cfg.dbsess.query(UserGroupMapping).\
filter(UserGroupMapping.groups_id==group.id):
user = cfg.dbsess.query(Users).\
filter(Users.id==ugmap.users_id).first()
# construct the memberOf list for groupOfNames
memberoflist.append(user.username)
# construct the member list for posixGroup
ldap_user_dn = "uid=%s,ou=%s,dc=" % (user.username, cfg.ldap_groups_ou)
ldap_user_dn += ',dc='.join(domain_parts)
memberlist.append(ldap_user_dn)
# construct the netgroup list for nisNetgroup
netgrouplist.append("(-,%s,)" % user.username)
else:
raise LDAPError("group object not supplied, aborting")
# connect ldap server(s) and do stuff
if server:
servers = [server]
else:
servers = cfg.ldap_servers
for myserver in servers:
# construct the Group record to add
if memberoflist:
g_attributes = [(ldap.MOD_REPLACE, 'description', group.description),
(ldap.MOD_REPLACE, 'gidNumber', str(group.gid)),
(ldap.MOD_REPLACE, 'memberUid', memberoflist),
(ldap.MOD_REPLACE, 'member', memberlist),
]
else:
g_attributes = [(ldap.MOD_REPLACE, 'description', group.description),
(ldap.MOD_REPLACE, 'gidNumber', str(group.gid)),
]
# construct the nisNetgroup record to add
if netgrouplist:
ng_attributes = [(ldap.MOD_REPLACE, 'description', group.description),
(ldap.MOD_REPLACE, 'nisNetgroupTriple', netgrouplist),
]
else:
ng_attributes = [(ldap.MOD_REPLACE, 'description', group.description),
]
# create a connection to the ldap server
ldcon = ld_connect(cfg, myserver)
# talk about our feelings
print "updating ldap Group record for %s" % (gdn)
print "updating ldap nisNetgroup record for %s" % (ngdn)
# slam the records into the server
ldcon.modify_s(gdn, g_attributes)
ldcon.modify_s(ngdn, ng_attributes)
ldcon.unbind()
# give something back to the community
return "success"
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
def gremove(cfg, group, server=None):
"""
[description]
remove a group
[parameter info]
required:
cfg: the config object. useful everywhere
group: the ORM group object
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
try:
if group:
# construct an array made of the domain parts, stitch it back together in a way
# that LDAP will understand to create our group
domain_parts = group.domain.split('.')
gdn = "cn=%s,ou=%s,dc=" % (group.groupname, cfg.ldap_groups_ou)
gdn += ',dc='.join(domain_parts)
ngdn = "cn=%s,ou=%s,dc=" % (group.groupname, cfg.ldap_netgroups_ou)
ngdn += ',dc='.join(domain_parts)
else:
raise LDAPError("group object not supplied, aborting")
# connect ldap server(s) and do stuff
if server:
servers = [server]
else:
servers = cfg.ldap_servers
for myserver in servers:
# create a connection to the ldap server
ldcon = ld_connect(cfg, myserver)
# talk about our feelings
print "removing ldap Group record for %s" % (gdn)
print "removing ldap nisNetgroup record for %s" % (ngdn)
# slam the records into the server
ldcon.delete_s(gdn)
ldcon.delete_s(ngdn)
ldcon.unbind()
# give something back to the community
return "success"
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
def grefresh_all(cfg, server=None):
"""
[description]
refresh the LDAP groups database. drop all groups, add them back in again
[parameter info]
required:
cfg: the config object. useful everywhere
optional:
server: restrict activity to a single server
[return value]
returns "success"
"""
# some vars we'll need later
domainlist = []
glist = []
nglist = []
grouplist = []
# do the needful
try:
# construct our group list
for group in cfg.dbsess.query(Groups).all()
grouplist.append(group)
if group.domain not in domainlist:
domainlist.append(group.domain)
# suss out the server situation
if server:
servers = [server]
else:
servers = cfg.ldap_servers
# connect to ldap server(s) and do stuff
for myserver in servers:
for domain in domainlist:
# make an array of the domain parts. stitch it back together in a way
# ldap will understand
domain_parts = domain.split('.')
gdn ="ou=%s,dc=" % cfg.ldap_groups_ou
gdn += ',dc='.join(domain_parts)
ngdn ="ou=%s,dc=" % cfg.ldap_netgroups_ou
ngdn += ',dc='.join(domain_parts)
# create a connection to the server
ldcon = ld_connect(cfg, myserver)
# ALL GROUPS BALEETED
search = '(objectClass=posixGroup)'
for result in ldcon.search_s(gdn, ldap.SCOPE_SUBTREE, search):
ldcon.delete_s(result[0])
# ALL NETGROUPS BALEETED
search = '(objectClass=nisNetgroup)'
for result in ldcon.search_s(ngdn, ldap.SCOPE_SUBTREE, search):
ldcon.delete_s(result[0])
# unbind thyself
ldcon.unbind()
# add the groups back in
for group in grouplist:
gadd(cfg, group, server=myserver)
# give something back to the community
return "success"
# something horrible has happened.
except ldap.LDAPError, e:
ldcon.unbind()
raise LDAPError(e)
def ldapimport(cfg, domain=None, server=None):
"""
[description]
import ldap data into vyvyan (DANGEROUS)
in fact this is so dangerous, i'm leaving
it unlinked for the moment. will need a LOT
more sanity checking before it can be allowed
into service
[parameter info]
required:
cfg: the config object. useful everywhere
optional:
domain: just import a single domain's worth of userdata
server: manually set the server to import from. leaving blank will pick from your configured list.
[return value]
returns "success"
"""
# some vars we'll need later
domainlist = []
grouplist = {}
netgrouplist = {}
userlist = {}
# check to see if groups or users tables are populated already. if so, bail out.
# we don't want to overwrite information already in vyvyan's database.
try:
if len(cfg.dbsess.query(Groups).all()) > 0 or len(cfg.dbsess.query(Users).all()) > 0:
raise LDAPError("Refusing to import into a populated database")
# suss out the server situation
if not server:
server = cfg.ldap_servers[0]
# suss out the domain situation
if not domain:
# connect to ldap server and grab a list of all domains (ie. namingContext)
ldcon = ld_connect(cfg, server)
search_filter = '(objectClass=namingContext)'
for result in ldcon.search_s('', ldap.SCOPE_BASE, search_filter, None):
# i kind of don't know that this will work. hopeful though.
# turn 'dc=' notation into a regular dotted domain
domainlist.append(result[1].replace(',dc=', '.').replace('dc=', ''))
ldcon.unbind()
else:
domainlist = [domain]
# iterate over discovered domains
for domain in domainlist:
# validate the thing to make sure we didn't get something insane
v_domain(domain)
# add the domain to the grouplist and userlist
grouplist[domain] = []
netgrouplist[domain] = []
userlist[domain] = []
sudoerslist[domain] = []
# make an array of the domain parts. stitch it back together in a way
# ldap will understand
# users dn
domain_parts = domain.split('.')
udn ="ou=%s,dc=" % cfg.ldap_users_ou
udn += ',dc='.join(domain_parts)
# groups dn
gdn ="ou=%s,dc=" % cfg.ldap_groups_ou
gdn += ',dc='.join(domain_parts)
# netgroups dn
ngdn ="ou=%s,dc=" % cfg.ldap_netgroups_ou
ngdn += ',dc='.join(domain_parts)
# sudoers dn
sdn ="ou=%s,dc=" % cfg.ldap_sudoers_ou
sdn += ',dc='.join(domain_parts)
# connect to ldap server and do stuff
ldcon = ld_connect(cfg, server)
# first, harvest groups
# don't know that we need this. keeping it here for posterity
#attr = ['memberUid', 'gidNumber', 'description', 'cn']
for result in ldcon.search_s(gdn, ldap.SCOPE_SUBTREE, '(objectClass=posixGroup)', None):
if result[1]:
grouplist[domain].append(result[1])
# next, harvest netgroups
# TODO: do we need these?
for result in ldcon.search_s(ngdn, ldap.SCOPE_SUBTREE, '(objectClass=nisNetgroup)', None):
if result[1]:
netgrouplist[domain].append(result[1])
# next, harvest users
for result in ldcon.search_s(udn, ldap.SCOPE_SUBTREE, '(objectClass=posixAccount)', None):
if result[1]:
userlist[domain].append(result[1])
# finally, harvest sudoers info
for result in ldcon.search_s(sdn, ldap.SCOPE_SUBTREE, '(objectClass=sudoRole)', None):
if result[1]:
sudoerslist[domain].append(result[1])
# clean up after ourselves
ldcon.unbind()
# need to do a bunch of parsing and re-arranging of sudoers to hopefully
# figure out what group needs what sudo commands and suchlike
sudoergrouplist = {}
sudoeruserlist = {}
for sudoer in sudoerslist[domain]:
if sudoer['sudoUser']:
for entry in sudoer['sudoUser']:
if '%' in entry:
sudoergrouplist[entry.replace('%', '')] = {'sudoHost': sudoer['sudoHost'], 'sudoCommand': sudoer['sudoCommand']}
else:
sudoeruserlist[entry] = {'sudoHost': sudoer['sudoHost'], 'sudoCommand': sudoer['sudoCommand']}
# create our groups first
# TODO: figure out how to inject SSHA passwords into the db
for domain in grouplist.keys():
for group in grouplist[domain]:
query = {'domain': domain,
'groupname': group['cn'],
'gid': group['gidNumber'],
'description': group['description'],
'sudo_cmds': [],
}
# figure out if the group has sudo commands associated with it
# NOTE: we don't make user-based sudoers entries so we'll need a way
# to inform the end user that there are commands that will be lost
if group['cn'] in sudoergrouplist.keys():
query['sudo_cmds'] = sudoergrouplist[group['cn']]['sudoCommand']
userdata.gadd(cfg, query)
# if this domain has users, make 'em.
if domain in userlist.keys():
for user in userlist[domain]:
query = {'username', user['uid'],
'uid', user['uidNumber'],
}
if 'gn' in user and user['gn']:
query['first_name'] = user['gn']
if 'sn' in user and user['sn']:
query['last_name'] = user['sn']
if 'homeDirectory' in user and user['homeDirectory']:
query['hdir'] = user['homeDirectory']
if 'loginShell' in user and user['loginShell']:
query['shell'] = user['loginShell']
if 'mail' in user and user['mail']:
query['email'] = user['mail']
#TODO: figure out ssh keys
#TODO: add users to groups with utog
#MARK
# this shit ain't nearly done
# pull the user data out of the ldap schema
username = result[0][1]['uid'][0]
if result[0][1]['sshPublicKey']:
# THIS SHOULD USE THE LIST VERSION, NOT A SINGLE MEMBER
# in case they have multiple keys
ssh_public_key = result[0][1]['sshPublicKey'][0]
else:
ssh_public_key = None
uid = result[0][1]['uidNumber'][0]
first_name, last_name = result[0][1]['gecos'][0].split(None, 1)
hdir = result[0][1]['homeDirectory'][0]
shell = result[0][1]['loginShell'][0]
email = result[0][1]['mail'][0]
# ldap doesn't store user type data so
# use the default type
user_type = cfg.def_user_type
# create a new user object and fire it into
# the db, face first.
u = Users(first_name, last_name, ssh_public_key, username, site_id, realm, uid, user_type, hdir, shell, email, True)
cfg.dbsess.add(u)
cfg.dbsess.commit()
# map the user we just created into the
# group we're currently operating on
ugmap = UserGroupMapping(g.id, u.id)
cfg.dbsess.add(ugmap)
cfg.dbsess.commit()
else:
print "User \"%s\" not created. The user is in a group in LDAP but does not actually exist in LDAP.\nMost likely this is a system user (such as \"nobody\" or \"apache\") that should not exist in LDAP." % user
# if the group doesn't exist, create it then map any users it
# may have into the vyvyan group definition
else:
groupname = result[1]['cn'][0]
print "group \"%s\" does not exist, creating from ldap info" % (groupname+'.'+fqn)
description = result[1]['description']
gid = int(result[1]['gidNumber'][0])
g = Groups(description, groupname, site_id, realm, gid)
cfg.dbsess.add(g)
cfg.dbsess.commit()
if userlist:
for user in userlist:
u = vyvyan.validate.v_get_user_obj(cfg, user+'.'+fqn)
if u:
print "mapping user \"%s\" into group \"%s\"" % (u.username+'.'+fqn, groupname+'.'+fqn)
ugmap = UserGroupMapping(g.id, u.id)
cfg.dbsess.add(ugmap)
cfg.dbsess.commit()
else:
# fetch user info from ldap, stuff into new
# user, map user into group
dn = "uid=%s,ou=%s,dc=" % (user, cfg.ldap_users_ou)
dn += ',dc='.join(d)
search = '(objectClass=person)'
try:
result = ldcon.search_s(dn, ldap.SCOPE_BASE, search)
except ldap.LDAPError, e:
print "Entity not found, skipping."
print e
result = False
if result:
# pull the user data out of the ldap schema
username = result[0][1]['uid'][0]
print "user \"%s\" does not exist, creating from ldap info and mapping into group \"%s\"" % (username+'.'+fqn, groupname+'.'+fqn)
if result[0][1]['sshPublicKey']:
ssh_public_key = result[0][1]['sshPublicKey'][0]
else:
ssh_public_key = None
uid = result[0][1]['uidNumber'][0]
first_name, last_name = result[0][1]['gecos'][0].split(None, 1)
hdir = result[0][1]['homeDirectory'][0]
shell = result[0][1]['loginShell'][0]
email = result[0][1]['mail'][0]
# ldap doesn't store user type data so
# use the default type
user_type = cfg.def_user_type
# create a new user object and fire it into
# the db, face first.
u = Users(first_name, last_name, ssh_public_key, username, site_id, realm, uid, user_type, hdir, shell, email, True)
cfg.dbsess.add(u)
cfg.dbsess.commit()
# map the user we just created into the
# group we're currently operating on
ugmap = UserGroupMapping(g.id, u.id)
cfg.dbsess.add(ugmap)
cfg.dbsess.commit()
else:
print "User \"%s\" not created. The user is in a group in LDAP but does not actually exist in LDAP.\nMost likely this is a system user (such as \"nobody\" or \"apache\") that should not exist in LDAP." % user
# now that we're done with the groups, let's go back in and make
# sure we create any leftover users that weren't in a group
dn = "ou=%s,dc=" % cfg.ldap_users_ou
dn += ',dc='.join(d)
search = '(objectClass=person)'
try:
results = ldcon.search_s(dn, ldap.SCOPE_BASE, search)
except ldap.LDAPError, e:
print "Entity not found, skipping."
print e
results = False
if results:
for result in results:
username = result[0][1]['uid'][0]
u = vyvyan.validate.v_get_user_obj(cfg, username+'.'+fqn)
if u:
print "user exists, skipping"
else:
# create a new user object and fire it into
# the db, face first.
print "user \"%s\" does not exist, creating from ldap info" % (username+'.'+fqn)
if result[0][1]['sshPublicKey']:
ssh_public_key = result[0][1]['sshPublicKey'][0]
else:
ssh_public_key = None
uid = result[0][1]['uidNumber'][0]
first_name, last_name = result[0][1]['gecos'][0].split(None, 1)
hdir = result[0][1]['homeDirectory'][0]
shell = result[0][1]['loginShell'][0]
email = result[0][1]['mail'][0]
# ldap doesn't store user type data so
# use the default type
user_type = cfg.def_user_type
u = Users(first_name, last_name, ssh_public_key, username, site_id, realm, uid, user_type, hdir, shell, email, True)
cfg.dbsess.add(u)
cfg.dbsess.commit()
# map the user we just created into the
# group we're currently operating on
ugmap = UserGroupMapping(g.id, u.id)
cfg.dbsess.add(ugmap)
cfg.dbsess.commit()
else:
print "No users found!"
# ACHTUNG! bits below here may be useful. they will probably need to be moved into userdata
def password_prompt(minchars, enctype):
import getpass
import hashlib
from base64 import encodestring as encode
ans1 = getpass.getpass("Enter new passwd:")
if len(ans1) < minchars:
print 'Password is too short! Must be at least %d char(s)' % minchars
return
ans2 = getpass.getpass("Re-type new passwd:")
if ans1 != ans2:
print 'Passwords do not match!'
return
salt = os.urandom(4)
h = eval('hashlib.%s(ans1)' % enctype)
h.update(salt)
return '{SSHA}' + encode(h.digest() + salt)[:-1]
def update_ldap_passwd(cfg, username):
u = vyvyan.validate.v_get_user_obj(cfg, username)
d = cfg.domain.split('.')
if u:
ldap_master = __get_master(cfg, u.realm+'.'+u.site_id)
dn = "uid=%s,ou=%s,dc=" % (u.username, cfg.ldap_users_ou)
dn += ',dc='.join(d)
else:
raise LDAPError("user \"%s\" not found, aborting" % username)
ldcon = ld_connect(cfg, ldap_master)
try:
raw_res = ldcon.search_s(dn, ldap.SCOPE_BASE)
if 'userPassword' in raw_res[0][1].keys():
print 'User %s ALREADY has LDAP password set' % u.username
else:
print 'User %s does NOT have LDAP password set' % u.username
newpass = password_prompt(8,'sha1')
try:
ldcon.modify_s(dn, [(ldap.MOD_REPLACE, 'userPassword', newpass)])
except ldap.LDAPError, e:
raise LDAPError(e)
ldcon.unbind()
except ldap.LDAPError, e:
raise LDAPError(e)
| UTF-8 | Python | false | false | 39,353 | py | 11 | __init__.py | 10 | 0.519478 | 0.516581 | 0 | 1,002 | 38.272455 | 236 |
michaellengyel/python | 18,459,769,456,728 | 4b82861e55ec491bcf0dd7d8e41ed7767ac6be82 | 9fc537133c8e2afdde743f5428952f3796900eaa | /collections_m.py | 391de2d030aebadb9ed0b6ca30d0da1bbc13eafc | []
| no_license | https://github.com/michaellengyel/python | 2630d4fcafce66e909065b86a7b9a4da4659bd4f | d9aaf56847ca22600135c71e5456f0686d69bde7 | refs/heads/main | 2023-06-30T07:50:57.478748 | 2021-07-31T19:57:25 | 2021-07-31T19:57:25 | 382,138,432 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def main():
### LISTS ###
print("LISTS: Is a collection which is ordered and changeable. Allows duplicate members.")
m_list = []
m_list.append(1)
m_list.append("test")
m_list.append(3.42453)
m_list.append(None)
print(m_list)
print(m_list[2])
print(m_list.count(3.42453))
# List comprehension
squares = [x ** 2 for x in range(10)]
print(squares)
### TUPLES ###
print("TUPLES: Is a collection which is ordered and unchangeable. Allows duplicate members.")
m_tuple_1 = (2, 1, 4)
m_tuple_2 = (m_tuple_1, "word", 42)
m_tuple_3 = (m_tuple_2, 1, 4.4)
print(m_tuple_1)
print(m_tuple_2)
print(m_tuple_3)
print(m_tuple_1[2])
### SETS ###
print("SETS: Is a collection which is unordered and unindexed. No duplicate members.")
m_set_1 = {"red", "green", "blue"}
m_set_2 = set(("red", "green", "blue"))
# Use set() function to create a set. An empty {}
print(m_set_1)
print(m_set_2)
m_set_1.add("red")
m_set_1.add("purple")
print(m_set_1)
### DICTIONARY ###
print("DICTIONARY: Is a collection which is ordered and changeable. No duplicate members.")
m_dict_1 = {1: "red", 2: "green", 3: "blue"}
m_dict_2 = {"red": [1, 2, 3], "green": 3.2332, 3: "apple"}
print(m_dict_1)
print(m_dict_1[2])
print(len(m_dict_1))
print(m_dict_2)
print(m_dict_2["green"])
m_dict_2["gin"] = 1827
print(m_dict_2)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,504 | py | 10 | collections_m.py | 6 | 0.564495 | 0.519282 | 0 | 63 | 22.873016 | 97 |
jasper2326/HackerRank | 13,967,233,695,473 | eef08572d538996589c051cc8120a264e5121cb8 | 697627ec7a9e7ec1bceaad0c7a0da48e966630ac | /offer/_23.py | 47d3061969be70b329a12ff3f20bc9edc1a84ce9 | []
| no_license | https://github.com/jasper2326/HackerRank | ab0090cc9906472f29c91225c1cf8f1d3a19113c | 1e5bad3bc734085fc12ce1cbd126742f2be506da | refs/heads/master | 2021-05-06T09:49:22.907317 | 2018-03-27T08:51:59 | 2018-03-27T08:51:59 | 114,071,619 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def printFromTop2Bottom(self, root):
result = []
if not root:
return result
queue = []
queue.append(root)
while queue:
node = queue.pop(0)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.append(node.val)
return result | UTF-8 | Python | false | false | 416 | py | 72 | _23.py | 72 | 0.490385 | 0.485577 | 0 | 17 | 23.529412 | 40 |
stoky123/Szkriptnyelvek | 5,660,766,916,618 | 1ab624dae26aea2051ac8209141f8825e7d07109 | 2ed43e0d98a9b5c9e2edc033ecf05b4c5a675c0e | /09/alcatraz.py | 6110a43eca17911567b6168178be63f8a07b9a96 | []
| no_license | https://github.com/stoky123/Szkriptnyelvek | 8ac625b9214410c4ef2fc59fdb1190b75d76869f | 42815760bd61ad1e3815d5ad46cf5c1744c1ae8d | refs/heads/master | 2023-05-02T08:29:09.888954 | 2021-12-26T18:26:30 | 2021-12-26T18:26:30 | 365,550,966 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
def main():
cells = [0] * 600
for i in range(len(cells)):
for j in range(i, len(cells), i+1):
cells[j] = (cells[j] + 1) % 2
print([i+1 for i in range(len(cells)) if cells[i] == 1])
##########################
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 337 | py | 53 | alcatraz.py | 52 | 0.403561 | 0.373887 | 0 | 18 | 16.833333 | 60 |
DzhonPetrus/FastAPI-test | 2,972,117,385,053 | aef9e68ab0d72704740168f54dbadc1eeff77eac | 89cfd37739c44c76c1baadad0673d8895bd76db7 | /blog/controllers/blog.py | d01c4786a88679cee9dc16d30600dc6eaefba112 | []
| no_license | https://github.com/DzhonPetrus/FastAPI-test | 4f000f39abf9817d51d20a62426570109cca8140 | 1ffcd2724698bc693bc099a93a132b958f4ec116 | refs/heads/master | 2023-07-05T14:11:56.736243 | 2021-08-22T17:39:50 | 2021-08-22T17:39:50 | 397,666,118 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fastapi import status, HTTPException
from sqlalchemy.orm import Session
from .. import schemas
from ..models import Blog
def get_all(db: Session):
blogs = db.query(Blog).all()
return blogs
def get_one(id, db: Session):
blog = db.query(Blog).filter(Blog.id == id).first()
if not blog:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Blog with id {id} not found')
return blog
def create(blog: schemas.Blog, db: Session, current_user):
new_blog = Blog(title=blog.title, body = blog.body, user_id=current_user["id"])
db.add(new_blog)
db.commit()
db.refresh(new_blog)
return new_blog
def destroy(id, db: Session):
blog = db.query(Blog).filter(Blog.id == id)
if not blog:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
else:
blog.delete(synchronize_session=False)
db.commit()
return {
'detail': f'Blog with id {id} deleted'
}
def update(id, blog: schemas.Blog, db: Session):
blog = db.query(Blog).filter(Blog.id == id)
if not blog.first():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Blog with id {id} not found')
else:
update({'title': blog.title, 'body': blog.body})
db.commit()
return {
'data': blog,
'detail': f'Blog with id {id} updated'
}
| UTF-8 | Python | false | false | 1,406 | py | 13 | blog.py | 9 | 0.620199 | 0.613798 | 0 | 53 | 25.471698 | 105 |
Asterion2014/python | 12,343,736,022,037 | fb4fd236196b8cf77fc5884d4c6d523095ad3289 | aafd3821212647c2d7883edf7361ff247380ec71 | /lesson-3.py | fcef5916be698149bf100f4e5c620e05b2d74ada | []
| no_license | https://github.com/Asterion2014/python | d93d4f1053acda60fef685cbf628aac9109cefb0 | a7d6c83d5d7e8ef62c22b44440970b7710ddf38a | refs/heads/home_work_3 | 2022-10-29T20:47:59.117776 | 2020-05-30T01:26:14 | 2020-05-30T01:26:14 | 265,492,775 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # author: Плуталов Максим Александрович
# 1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
# Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
def div(*args):
try:
arg1 = int(input("Введите числитель: "))
arg2 = int(input("Введите знаменатель: "))
res = arg1 / arg2
except ValueError:
return 'ValueError'
except ZeroDivisionError:
return "Ошибка! Деление на ноль недопустимо!"
return res
print(f'Результат: {div()}')
# 2. Реализовать функцию, принимающую несколько параметров, описывающих данные пользователя:
# имя, фамилия, год рождения, город проживания, email, телефон. Функция должна принимать параметры
# как именованные аргументы. Реализовать вывод данных о пользователе одной строкой.
def my_func(**kwargs):
return kwargs
print(my_func(name = input("Введите имя: "),
surname=input("Введите фамилию: "),
year=input("Введите год рождения: "),
city=input("Введите город проживания: "),
email=input("Введите адрес электронной почты: "),
phone=input("Введите номер телефона: ")))
# 3. Реализовать функцию my_func(), которая принимает три позиционных аргумента,
# и возвращает сумму наибольших двух аргументов.
import random
def my_func(*args):
arg_1 = random.randint(0, 100)
arg_2 = random.randint(0, 100)
arg_3 = random.randint(0, 100)
print('первый аргумент:', arg_1)
print('второй аргумент:', arg_2)
print('третий аргумент:', arg_3)
print('минимальный аргумент: ', min(arg_1, arg_2, arg_3))
print('сумма максимальных аргументов равна: ', arg_1+arg_2+arg_3-min(arg_1, arg_2, arg_3))
my_func()
# 4. #Программа принимает действительное положительное число x и целое отрицательное число y.
#Необходимо выполнить возведение числа x в степень y. Задание необходимо реализовать в виде функции my_func(x, y).
#При решении задания необходимо обойтись без встроенной функции возведения числа в степень.
def my_func(x, y):
res = 1
for i in range(abs(y)):
res *= x
if y >= 0:
return res
else:
return 1 / res
print(my_func(float(input("Введите х: ")), int(input("Введите y: ")))) #y вводить со знаком "-"
| UTF-8 | Python | false | false | 3,333 | py | 3 | lesson-3.py | 3 | 0.679007 | 0.662456 | 0 | 64 | 34.875 | 114 |
nhoyer96/University | 9,397,388,457,924 | c8a720f5f029837dc0638cfd26ec3285f185e432 | 0eb485a4ffe37403f6ccb89f3bb0172986c7a6f9 | /Fortgeschrittenen_Praktikum/F70/Calculations/pipe_blind.py | e880c08e407cc94a8b3efb42b3222a63282bf156 | []
| no_license | https://github.com/nhoyer96/University | c846d1ad1b594ce43d70765542c797e653b4f6d8 | 27fc101b1b67ab0fdb921e32d2a9492dfd702efb | refs/heads/master | 2018-11-29T11:05:49.621901 | 2018-11-19T17:56:42 | 2018-11-19T17:56:42 | 144,573,958 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from math import sqrt, pi
p_S = [1.0*10**-5, 3.3*10**-5, 1.0*10**-4, 3.2*10**-4, 1.0*10**-3, 3.2*10**-3, 1.0*10**-2, 3.3*10**-2, 0.12, 0.37, 1.1] #Measurement of pumping speed S
p_S_error = [10**-6, 10**-6, 10**-5, 10**-5, 10**-4, 10**-4, 10**-3, 10**-3, 10**-2, 10**-2, 0.1]
V = [0.02, 0.02, 0.06, 0.08, 0.12, 5.0, 5.0, 5.0, 10.0, 15.0, 15.0]
V_error = [0.003, 0.003, 0.003, 0.003, 0.003, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]
t = [62.11, 8.01, 14.20, 2.95, 1.53, 19.31, 6.92, 2.98, 3.46, 2.82, 3.04]
t_error = [0.14, 0.14, 0.14, 0.14, 0.14, 0.14, 0.14, 0.14, 0.14, 0.14, 0.14]
p_vc1 = [1.0*10**-5, 3.2*10**-5, 1.0*10**-4, 3.2*10**-4, 1.0*10**-3, 3.2*10**-3, 1.0*10**-2, 3.2*10**-2, 0.1, 0.32, 1.0] # 1=pipe
p_vc1_error = [10**-6, 10**-6, 10**-5, 10**-5, 10**-4, 10**-4, 10**-3, 10**-3, 10**-2, 10**-2, 0.1]
p_a1 = [1022, 1022, 1022, 1022, 1022, 1022, 1022, 1022, 1022, 1022, 1022]
p_a1_error = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
p_top1 = [1.0*10**-4, 3.3*10**-3, 1.0*10**-2, 2.5*10**-2, 5.4*10**-2, 8.9*10**-2, 0.13, 0.19, 0.21, 0.79, 1.3]
p_top1_error = [10**-5, 10**-4, 10**-3, 10**-3, 10**-3, 10**-3, 10**-2, 10**-2, 10**-2, 10**-2, 0.1]
p_vc2 = [1.0*10**-5, 3.2*10**-5, 1.0*10**-4, 3.2*10**-4, 1.0*10**-3, 3.2*10**-3, 1.0*10**-2, 3.2*10**-2, 0.1, 0.32, 1.0] #2=blind
p_vc2_error = [10**-6, 10**-6, 10**-5, 10**-5, 10**-4, 10**-4, 10**-3, 10**-3, 10**-2, 10**-2, 0.1]
p_a2 = [1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020]
p_a2_error = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
p_top2 = [1.5*10**-4, 3.3*10**-3, 1.3*10**-2, 3.5*10**-2, 8.0*10**-2, 0.18, 0.87, 2.0, 2.6, 4.8, 9.6, 9.7]
p_top2_error = [10**-5, 10**-4, 10**-3, 10**-3, 10**-3, 10**-2, 10**-2, 0.1, 0.1, 0.1, 0.1, 0.1]
p_vc3 = [1.0*10**-5, 3.2*10**-5, 1.0*10**-4, 3.2*10**-4, 1.0*10**-3, 3.2*10**-3, 1.0*10**-2, 3.2*10**-2, 0.1, 0.32, 1.0] #3=pipe+blind
p_vc3_error = [10**-6, 10**-6, 10**-5, 10**-5, 10**-4, 10**-4, 10**-3, 10**-3, 10**-2, 10**-2, 0.1]
p_a3 = [1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018]
p_a3_error = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
p_top3 = [6.0*10**-4, 5.0*10**-3, 1.4*10**-2, 3.5*10**-2, 8.0*10**-2, 0.17, 1.0, 2.3, 3.0, 6.3, 11.0]
p_top3_error = [10**-5, 10**-4, 10**-3, 10**-3, 10**-3, 10**-2, 0.1, 0.1, 0.1, 0.1, 1]
r_pipe = 0.0158/2
r_pipe_error = 0.0001/2
l = 0.9998
l_error = 0.0010
eta = 1.71*10**-7
r_blind = 0.0021/2
r_blind_error = 0.0001/2
S1_list = []
S1_error_list = []
S2_list = []
S2_error_list = []
S3_list = []
S3_error_list = []
L_pipe_calc_list = []
L_pipe_calc_error_list = []
L_pipe_theo_list = []
L_pipe_theo_error_list = []
L_blind_calc_list = []
L_blind_calc_error_list = []
L_blind_theo_list = []
L_blind_theo_error_list = []
L_total_calc_list = []
L_total_calc_error_list = []
L_total_theo_list = []
L_total_theo_error_list = []
zip1 = zip(V, V_error, t, t_error, p_a1, p_a1_error, p_S, p_S_error, p_top1, p_top1_error, p_vc1, p_vc1_error)
zip2 = zip(V, V_error, t, t_error, p_a2, p_a2_error, p_S, p_S_error, p_top2, p_top2_error, p_vc2, p_vc2_error)
zip3 = zip(p_top1, p_top1_error, p_vc1, p_vc1_error, p_a1, p_a1_error)
zip4 = zip(V, V_error, t, t_error, p_a3, p_a3_error, p_S, p_S_error, p_top3, p_top3_error, p_vc3, p_vc3_error)
zip5 = zip(L_pipe_theo_list, L_pipe_theo_error_list, L_blind_theo_list, L_blind_theo_error_list)
for i in zip1:
S1 = i[0]/i[2] * i[4]/i[6]
S1_error = np.sqrt((i[1]/i[2] * i[4]/i[6])**2 + (i[0]*i[3]/i[2]**2 * i[4]/i[6])**2 + (i[0]/i[2] * i[5]/i[6])**2 + (i[0]/i[2] * i[4]*i[7]/i[6]**2)**2)
S1_list.append(S1)
S1_error_list.append(S1_error)
for i in zip2:
S2 = i[0]/i[2] * i[4]/i[6]
S2_error = np.sqrt((i[1]/i[2] * i[4]/i[6])**2 + (i[0]*i[3]/i[2]**2 * i[4]/i[6])**2 + (i[0]/i[2] * i[5]/i[6])**2 + (i[0]/i[2] * i[4]*i[7]/i[6]**2)**2)
S2_list.append(S2)
S2_error_list.append(S2_error)
for i in zip4:
S3 = i[0]/i[2] * i[4]/i[6]
S3_error = np.sqrt((i[1]/i[2] * i[4]/i[6])**2 + (i[0]*i[3]/i[2]**2 * i[4]/i[6])**2 + (i[0]/i[2] * i[5]/i[6])**2 + (i[0]/i[2] * i[4]*i[7]/i[6]**2)**2)
S3_list.append(S3)
S3_error_list.append(S3_error)
for i in range(0, len(S1_list), 1): #zip1 #pipe
L_pipe_calc = S1_list[i]*zip1[i][10]/abs(zip1[i][8]-zip1[i][10])
l1 = S1_error_list[i]*zip1[i][10]/(zip1[i][8]-zip1[i][10])
l2 = S1_list[i]*zip1[i][8]*zip1[i][11]/(zip1[i][8]-zip1[i][10])**2
l3 = S1_list[i]*zip1[i][9]*zip1[i][10]/(zip1[i][8]-zip1[i][10])**2
L_pipe_calc_error = np.sqrt((l1)**2 + (l2)**2 + (l3)**2)
L_pipe_calc_list.append(L_pipe_calc)
L_pipe_calc_error_list.append(L_pipe_calc_error)
for i in zip3:
L_pipe_theo = pi/16 * r_pipe**4 * (i[0]+i[2])/(eta*l) * 1000000
L_pipe_theo_error = pi/(16*eta) * np.sqrt((4*r_pipe**3 * (i[0]+i[2]) * r_pipe_error /l)**2 + (r_pipe**4 * i[1]/l)**2 + (r_pipe**4 * i[3]/l)**2 + (r_pipe**4 * (i[0]+i[2])*l_error /l**2)**2)
L_pipe_theo_list.append(L_pipe_theo)
L_pipe_theo_error_list.append(L_pipe_theo_error)
for i in range(0, len(S2_list), 1): #zip2 #blind
L_blind_calc = S2_list[i]*zip2[i][10]/abs(zip2[i][8]-zip2[i][10])
l1 = S2_error_list[i]*zip2[i][10]/(zip2[i][8]-zip2[i][10])
l2 = S2_list[i]*zip2[i][8]*zip2[i][11]/(zip2[i][8]-zip2[i][10])**2
l3 = S2_list[i]*zip2[i][9]*zip2[i][10]/(zip2[i][8]-zip2[i][10])**2
L_blind_calc_error = np.sqrt((l1)**2 + (l2)**2 + (l3)**2)
L_blind_calc_list.append(L_blind_calc)
L_blind_calc_error_list.append(L_blind_calc_error)
L_blind_theo = 362*r_blind**2 * 1000000
L_blind_theo_error = 362*2*r_blind*r_blind_error
L_blind_theo_list.append(L_blind_theo)
L_blind_theo_error_list.append(L_blind_theo_error)
for i in range(0, len(S3_list), 1): #zip4 #pipe+blind
L_total_calc = S3_list[i]*zip4[i][10]/abs(zip4[i][8]-zip4[i][10])
l1 = S3_error_list[i]*zip4[i][10]/(zip4[i][8]-zip4[i][10])
l2 = S3_list[i]*zip4[i][8]*zip4[i][11]/(zip4[i][8]-zip4[i][10])**2
l3 = S3_list[i]*zip4[i][9]*zip4[i][10]/(zip4[i][8]-zip4[i][10])**2
L_total_calc_error = np.sqrt((l1)**2 + (l2)**2 + (l3)**2)
L_total_calc_list.append(L_total_calc)
L_total_calc_error_list.append(L_total_calc_error)
for i in range(0, len(L_pipe_theo_list), 1): #zip5
L_total_theo = L_pipe_theo_list[i]* L_blind_theo_list[i] /(L_pipe_theo_list[i] + L_blind_theo_list[i])
L_total_theo_error = np.sqrt((L_pipe_theo_list[i]**2 / (L_pipe_theo_list[i] + L_blind_theo_list[i])**2 * L_blind_theo_error_list[i])**2 + ((L_blind_theo_list[i]**2 / (L_pipe_theo_list[i] + L_blind_theo_list[i])**2 * L_pipe_theo_error_list[i]))**2)
L_total_theo_list.append(L_total_theo)
L_total_theo_error_list.append(L_total_theo_error)
print(L_total_theo_list)
print(L_total_calc_list)
#pipe2
plt.figure(figsize=(19.20, 10.80))
plt.errorbar(p_S, L_pipe_calc_list, xerr = p_S_error, yerr = L_pipe_calc_error_list, fmt = "o", label="Calculated values")
plt.errorbar(p_vc1, L_pipe_theo_list, xerr = p_vc1_error, yerr = L_pipe_theo_error_list, fmt = "o", label="Theoretical values")
plt.xscale("log")
plt.grid(True)
plt.xlabel("pressure inside the vacuum chamber in [mbar]", fontsize=15)
plt.ylabel("Conductance in [mL/s]", fontsize=15)
plt.legend(bbox_to_anchor=(0.5, 0.9), loc=1, borderaxespad=0.1)
plt.savefig("pipe.png")
#blind2
plt.figure(figsize=(19.20, 10.80))
plt.errorbar(p_S, L_blind_calc_list, xerr = p_S_error, yerr = L_blind_calc_error_list, fmt = "o", label="Calculated values")
plt.errorbar(p_vc2, L_blind_theo_list, xerr = p_vc2_error, yerr = L_blind_theo_error_list, fmt = "o", label="Theoretical values")
plt.xscale("log")
plt.grid(True)
plt.xlabel("pressure inside the vacuum chamber in [mbar]", fontsize=15)
plt.ylabel("Conductance in [mL/s]", fontsize=15)
plt.legend(bbox_to_anchor=(0.5, 0.9), loc=1, borderaxespad=0.1)
plt.savefig("blind.png")
#pipe+blind
plt.figure(figsize=(19.20, 10.80))
plt.errorbar(p_S, L_total_calc_list, xerr = p_S_error, yerr = L_total_calc_error_list, fmt = "o", label="Calculated values")
plt.errorbar(p_vc3, L_total_theo_list, xerr = p_vc3_error, yerr = L_total_theo_error_list, fmt = "o", label="Theoretical values")
plt.xscale("log")
plt.grid(True)
plt.xlabel("pressure inside the vacuum chamber in [mbar]", fontsize=15)
plt.ylabel("Conductance in [mL/s]", fontsize=15)
plt.legend(bbox_to_anchor=(0.5, 0.9), loc=1, borderaxespad=0.1)
plt.savefig("pipe+blind.png")
| UTF-8 | Python | false | false | 8,204 | py | 102 | pipe_blind.py | 15 | 0.577157 | 0.417114 | 0 | 177 | 45.344633 | 249 |
bobur554396/PPII2021Spring | 3,968,549,821,851 | 13385922c4c106c426b127f1baa0245240c82781 | 2497427584b543bb3263ca82a1b7e4506f9b6ec9 | /w4/2.py | 96a9d11e3f49c217d80f1be2e05c8240b3d1b032 | []
| no_license | https://github.com/bobur554396/PPII2021Spring | 10deb3c654eeb9966f7106887181387a7d72783d | 07f4e89b74e59da520162391359bcaaf4f457c23 | refs/heads/main | 2023-04-07T20:53:56.412834 | 2021-04-17T09:56:38 | 2021-04-17T09:56:38 | 333,086,949 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # RegEx - Regular Expression
import re
txt = "The rain in Spain"
x = re.find
all("in", txt)
print(x)
| UTF-8 | Python | false | false | 105 | py | 63 | 2.py | 60 | 0.647619 | 0.647619 | 0 | 9 | 10.555556 | 28 |
cafischer/analyze_in_vivo | 6,811,818,140,598 | 889d56e84bb069315a9438dd801f774be411ff68 | 15514b8cdb9ef2bb25a33e44a2abe79e5eb86439 | /analyze_in_vivo/analyze_schmidt_hieber/theta_and_ramp.py | 43e20b61b37564cf617117a19a14fa6a60a04387 | []
| no_license | https://github.com/cafischer/analyze_in_vivo | 389ce0d51c6cbeb3e39648aaff13263f0c99060a | e38e1057420b5329504f7095f1ee89e2a293df23 | refs/heads/master | 2021-06-10T00:18:47.741793 | 2019-09-14T08:47:53 | 2019-09-14T08:47:53 | 100,512,098 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import numpy as np
import os
from analyze_in_vivo.load import load_full_runs
from grid_cell_stimuli.remove_APs import remove_APs
from grid_cell_stimuli.ramp_and_theta import get_ramp_and_theta, plot_spectrum, plot_v_ramp_theta
from grid_cell_stimuli.downsample import antialias_and_downsample
from scipy.signal import spectrogram
import matplotlib.pyplot as pl
from analyze_in_vivo.spatial_firing_rate import get_spatial_firing_rate, identify_firing_fields, \
get_start_end_idxs_in_out_field_in_time
if __name__ == '__main__':
save_dir = '../results/schmidthieber/full_traces/theta_and_ramp'
data_dir = '../data/'
cell_ids = ["20101031_10o31c", "20110513_11513", "20110910_11910b",
"20111207_11d07c", "20111213_11d13b", "20120213_12213"]
# parameters
cutoff_ramp = 3 # Hz
cutoff_theta_low = 5 # Hz
cutoff_theta_high = 11 # Hz
transition_width = 1 # Hz
ripple_attenuation = 60.0 # db
params = {'cutoff_ramp': cutoff_ramp, 'cutoff_theta_low': cutoff_theta_low,
'cut_off_theta_high': cutoff_theta_high, 'transition_width': transition_width,
'ripple_attenuation': ripple_attenuation}
max_ramp = []
min_ramp = []
max_theta = []
min_theta = []
for i, cell_id in enumerate(cell_ids):
# load
v, t, x_pos, y_pos, pos_t, speed, speed_t = load_full_runs(data_dir, cell_id)
dt = t[1] - t[0]
AP_threshold = np.min(v) + 2./3 * np.abs(np.min(v) - np.max(v)) - 5
# remove APs
v_APs_removed = remove_APs(v, t, AP_threshold, t_before=3, t_after=6)
# downsample
cutoff_freq = 2000 # Hz
v_downsampled, t_downsampled, filter = antialias_and_downsample(v_APs_removed, dt, ripple_attenuation=60.0,
transition_width=5.0,
cutoff_freq=cutoff_freq,
dt_new_max=1. / cutoff_freq * 1000)
dt_downsampled = t_downsampled[1] - t_downsampled[0]
# get ramp and theta
ramp, theta, t_ramp_theta, filter_ramp, filter_theta = get_ramp_and_theta(v_downsampled, dt_downsampled,
ripple_attenuation,
transition_width, cutoff_ramp,
cutoff_theta_low,
cutoff_theta_high,
pad_if_to_short=True)
plot_v_ramp_theta(v_downsampled, t_downsampled, ramp, theta, t_ramp_theta, None)
#plot_spectrum(v_downsampled, ramp, theta, dt, None)
cutoff_freq = 200 # Hz
v_downsampled, t_downsampled, filter = antialias_and_downsample(v_APs_removed, dt, ripple_attenuation=60.0,
transition_width=5.0,
cutoff_freq=cutoff_freq,
dt_new_max=1. / cutoff_freq * 1000)
dt_downsampled = t_downsampled[1] - t_downsampled[0]
frequencies, t_spec, spectogram = spectrogram(v_downsampled, fs=1 / dt_downsampled * 1000)
t_spec *= 1000
theta_range_bool = np.logical_and(cutoff_theta_low <= frequencies, frequencies <= cutoff_theta_high)
pl.figure()
pl.pcolormesh(t_spec, frequencies[theta_range_bool], spectogram[theta_range_bool, :])
pl.ylabel('Frequency (Hz)')
pl.xlabel('Time (ms)')
#pl.ylim(0, 40)
pl.colorbar()
pl.tight_layout()
#pl.show()
# plot power spectrum for in-field vs out-field
spatial_firing_rate, positions, loc_spikes = get_spatial_firing_rate(v, t, y_pos, pos_t, h=3,
AP_threshold=AP_threshold, bin_size=0.5,
track_len=np.max(y_pos))
in_field_idxs_per_field, out_field_idxs_per_field = identify_firing_fields(spatial_firing_rate,
fraction_from_peak_rate=0.10)
start_end_idx_in_field, start_end_idx_out_field = get_start_end_idxs_in_out_field_in_time(t_spec,
positions, y_pos,
pos_t,
in_field_idxs_per_field,
out_field_idxs_per_field)
spectograms_in_field = []
for start_idx, end_idx in start_end_idx_in_field:
spectograms_in_field.append(spectogram[:, start_idx:end_idx])
spectograms_in_field = np.hstack(spectograms_in_field)
spectogram_in_field_avg = np.mean(spectograms_in_field, axis=1)
spectograms_out_field = []
for start_idx, end_idx in start_end_idx_out_field:
spectograms_out_field.append(spectogram[:, start_idx:end_idx])
spectograms_out_field = np.hstack(spectograms_out_field)
spectogram_out_field_avg = np.mean(spectograms_out_field, axis=1)
pl.figure()
pl.plot(frequencies, spectogram_in_field_avg, 'orange', label='in-field')
pl.plot(frequencies, spectogram_out_field_avg, 'b', label='out-field')
pl.xlabel('Frequency (Hz)')
pl.ylabel('Average Power')
pl.legend()
pl.tight_layout()
pl.show() | UTF-8 | Python | false | false | 6,101 | py | 150 | theta_and_ramp.py | 149 | 0.49205 | 0.46894 | 0 | 114 | 52.526316 | 123 |
iandennismiller/ledgerhelpers | 6,038,724,050,251 | e04c29b511faea950cb6cfbb7fe56eaf164c0c1c | c5f3b7efe56335bc3bc9028d6119bacea5747c0a | /test/test_parser.py | e6f177edd384f1ef405d8ebd0174652738602dfa | []
| no_license | https://github.com/iandennismiller/ledgerhelpers | 8fe2dee87598a1e4f6a597f2f6bfe29b327731ab | 25691f78d6a47029b8e1ea932379bdd8937904c8 | refs/heads/master | 2021-08-28T09:02:07.668481 | 2017-12-11T19:33:26 | 2017-12-11T19:33:26 | 113,881,845 | 0 | 0 | null | true | 2017-12-11T17:04:49 | 2017-12-11T16:25:33 | 2017-12-11T16:25:35 | 2017-12-11T16:28:56 | 314 | 0 | 0 | 1 | Python | false | null | import datetime
import ledgerhelpers.parser as parser
import test.test_base as base
from unittest import TestCase as T
class TestParser(T):
def test_simple_transaction(self):
c = base.data("simple_transaction.dat")
items = parser.lex_ledger_file_contents(c)
self.assertEqual(len(items), 3)
for n, tclass in enumerate([
parser.TokenWhitespace,
parser.TokenTransaction,
parser.TokenWhitespace,
]):
self.assertIsInstance(items[n], tclass)
transaction = items[1]
self.assertEqual(transaction.date, datetime.date(2015, 3, 12))
self.assertEqual(transaction.clearing_date, datetime.date(2015, 3, 15))
self.assertEqual(transaction.payee, "beer")
for n, (ac, am) in enumerate([
("Accounts:Cash", "-6.00 CHF"),
("Expenses:Drinking", "6.00 CHF"),
]):
self.assertEqual(transaction.postings[n].account, ac)
self.assertEqual(transaction.postings[n].amount, am)
def test_no_end_value(self):
c = base.data("no_end_value.dat")
items = parser.lex_ledger_file_contents(c)
self.assertEqual(len(items), 5)
for n, tclass in enumerate([
parser.TokenWhitespace,
parser.TokenTransaction,
parser.TokenWhitespace,
parser.TokenTransaction,
parser.TokenWhitespace,
]):
self.assertIsInstance(items[n], tclass)
for transaction in (items[1], items[3]):
self.assertEqual(transaction.payee, "beer")
for n, (ac, am) in enumerate([
("Accounts:Cash", "-6.00 CHF"),
("Expenses:Drinking", ""),
]):
self.assertEqual(transaction.postings[n].account, ac)
self.assertEqual(transaction.postings[n].amount, am)
def test_with_comments(self):
c = base.data("with_comments.dat")
items = parser.lex_ledger_file_contents(c)
self.assertEqual(len(items), 3)
for n, tclass in enumerate([
parser.TokenWhitespace,
parser.TokenTransaction,
parser.TokenWhitespace,
]):
self.assertIsInstance(items[n], tclass)
transaction = items[1]
self.assertEqual(transaction.date, datetime.date(2011, 12, 25))
self.assertEqual(transaction.clearing_date, datetime.date(2011, 12, 25))
self.assertEqual(transaction.payee, "a gift!")
self.assertEqual(transaction.state, parser.STATE_CLEARED)
for n, (ac, am) in enumerate([
("Assets:Metals", "1 \"silver coin\" @ $55"),
("Income:Gifts", "$ -55"),
]):
self.assertEqual(transaction.postings[n].account, ac)
self.assertEqual(transaction.postings[n].amount, am)
def test_my_data_file(self):
try:
c = base.data("/home/user/.ledger")
except IOError:
return
items = parser.lex_ledger_file_contents(c)
| UTF-8 | Python | false | false | 3,044 | py | 16 | test_parser.py | 16 | 0.587385 | 0.570631 | 0 | 78 | 38.025641 | 80 |
usnistgov/core_oaipmh_harvester_app | 9,010,841,434,475 | 147407e8c89eba0130661a4718e65ddada08ca82 | 78dbae82469a0a3a0b8be66da2d6c4b8c9ab894a | /tests/components/oai_verbs/tests_unit.py | d365416ef3e0c6bbf41099f4c41ae47b6e96ebd3 | [
"NIST-Software",
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | https://github.com/usnistgov/core_oaipmh_harvester_app | 07a9c4d5c6b7078c10aab8e675318f71703353fd | bc5e31a9d7e5f66e34340230ae17a3cc2d08e7e7 | refs/heads/master | 2023-09-01T08:48:47.677916 | 2023-07-14T00:41:54 | 2023-07-14T00:46:46 | 97,263,716 | 1 | 2 | NOASSERTION | false | 2021-07-06T18:36:16 | 2017-07-14T18:48:57 | 2021-07-06T18:35:36 | 2021-07-06T18:35:33 | 344 | 1 | 2 | 2 | Python | false | false | """ Unit Test oai_verbs
"""
import requests
from rest_framework import status
from unittest.case import TestCase
from unittest.mock import patch
import core_oaipmh_harvester_app.components.oai_verbs.api as oai_verbs_api
from core_oaipmh_common_app.commons import exceptions as oai_pmh_exceptions
from core_oaipmh_harvester_app.components.oai_harvester_metadata_format.models import (
OaiHarvesterMetadataFormat,
)
from core_oaipmh_harvester_app.components.oai_harvester_set.models import (
OaiHarvesterSet,
)
from core_oaipmh_harvester_app.components.oai_identify.models import (
OaiIdentify,
)
from tests.components.oai_registry.fixtures.fixtures import OaiPmhMock
from tests.test_settings import SSL_CERTIFICATES_DIR
class TestIdentifyAsObject(TestCase):
"""Test Identify As Object"""
@patch.object(
oai_verbs_api.transform_operations,
"transform_dict_identifier_to_oai_identifier",
)
@patch.object(oai_verbs_api, "identify")
def test_identify_as_object_return_object_and_ok_status(
self, mock_identify, mock_transform
):
"""test_identify_as_object_return_object_and_ok_status"""
# Arrange
mock_identify.return_value = [], status.HTTP_200_OK
mock_transform.return_value = OaiIdentify()
# Act
data, status_code = oai_verbs_api.identify_as_object("")
# Assert
self.assertIsInstance(data, OaiIdentify)
self.assertEqual(status_code, status.HTTP_200_OK)
class TestListMetadataFormatsAsObject(TestCase):
"""Test List Metadata Formats As Object"""
@patch.object(
oai_verbs_api.transform_operations,
"transform_dict_metadata_format_to_oai_harvester_metadata_format",
)
@patch.object(oai_verbs_api, "list_metadata_formats")
def test_list_metadata_formats_as_object_return_object_and_ok_status(
self, mock_metadata_format, mock_transform
):
"""test_list_metadata_formats_as_object_return_object_and_ok_status"""
# Arrange
mock_metadata_format.return_value = [], status.HTTP_200_OK
mock_transform.return_value = [
OaiHarvesterMetadataFormat(),
OaiHarvesterMetadataFormat(),
]
# Act
data, status_code = oai_verbs_api.list_metadata_formats_as_object("")
# Assert
self.assertTrue(
all(isinstance(item, OaiHarvesterMetadataFormat) for item in data)
)
self.assertEqual(status_code, status.HTTP_200_OK)
class TestListSetsAsObject(TestCase):
"""Test List Sets As Object"""
@patch.object(
oai_verbs_api.transform_operations,
"transform_dict_set_to_oai_harvester_set",
)
@patch.object(oai_verbs_api, "list_sets")
def test_list_sets_as_object_return_object_and_ok_status(
self, mock_set, mock_transform
):
"""test_list_sets_as_object_return_object_and_ok_status"""
# Arrange
mock_set.return_value = [], status.HTTP_200_OK
mock_transform.return_value = [OaiHarvesterSet(), OaiHarvesterSet()]
# Act
data, status_code = oai_verbs_api.list_sets_as_object("")
# Assert
self.assertTrue(
all(isinstance(item, OaiHarvesterSet) for item in data)
)
self.assertEqual(status_code, status.HTTP_200_OK)
class TestListRecordsParameter(TestCase):
"""TestListRecordsParameter"""
def setUp(self):
"""setUp"""
super().setUp()
self.url = "http://dummy_url.com"
self.metadata_prefix = "oai_prefix"
self.set = "oai_set"
self.from_ = "2017-04-24T02:00:00Z"
self.until = "2018-04-24T02:00:00Z"
@patch.object(requests, "get")
def test_harvest_params(self, mock_get):
"""test_harvest_params"""
# Arrange
mock_get.return_value.status_code = status.HTTP_200_OK
mock_get.return_value.text = (
OaiPmhMock.mock_oai_response_list_records()
)
expected_params = {
"verb": "ListRecords",
"metadataPrefix": self.metadata_prefix,
"set": self.set,
"from": self.from_,
"until": self.until,
}
# Act
oai_verbs_api.list_records(
url=self.url,
metadata_prefix=self.metadata_prefix,
set_h=self.set,
from_date=self.from_,
until_date=self.until,
)
# Assert
mock_get.assert_called_with(
self.url, expected_params, verify=SSL_CERTIFICATES_DIR
)
@patch.object(requests, "get")
def test_harvest_params_with_resumption_token(self, mock_get):
"""test_harvest_params_with_resumption_token"""
# Arrange
mock_get.return_value.status_code = status.HTTP_200_OK
mock_get.return_value.text = (
OaiPmhMock.mock_oai_response_list_records()
)
resumption_token = "h34fh"
expected_params = {"verb": "ListRecords", "resumptionToken": "h34fh"}
# Act
oai_verbs_api.list_records(
url=self.url,
metadata_prefix=self.metadata_prefix,
set_h=self.set,
from_date=self.from_,
until_date=self.until,
resumption_token=resumption_token,
)
# Asset
mock_get.assert_called_with(
self.url, expected_params, verify=SSL_CERTIFICATES_DIR
)
@patch.object(requests, "get")
def test_harvest_params_returns_error_if_not_200_OK(self, mock_get):
"""test_harvest_params_returns_error_if_not_200_OK"""
# Arrange
error = "An error occurred while trying to get data from the server."
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
mock_get.return_value.status_code = status_code
mock_get.return_value.text = "Error."
# Act
result, resumption_token = oai_verbs_api.list_records(self.url)
# Assert
self.assertEqual(
result.data[oai_pmh_exceptions.OaiPmhMessage.label], error
)
self.assertEqual(result.status_code, status_code)
@patch.object(requests, "get")
def test_harvest_params_returns_error_if_404_not_found(self, mock_get):
"""test_harvest_params_returns_error_if_404_not_found"""
# Arrange
error = "Impossible to get data from the server. Server not found"
status_code = status.HTTP_404_NOT_FOUND
mock_get.return_value.status_code = status_code
mock_get.return_value.text = "Error."
# Act
result, resumption_token = oai_verbs_api.list_records(self.url)
# Assert
self.assertEqual(
result.data[oai_pmh_exceptions.OaiPmhMessage.label], error
)
self.assertEqual(result.status_code, status_code)
@patch.object(requests, "get")
def test_harvest_params_returns_serialized_data_and_resumption_token(
self, mock_get
):
"""test_harvest_params_returns_serialized_data_and_resumption_token"""
# Arrange
mock_get.return_value.status_code = status.HTTP_200_OK
mock_get.return_value.text = (
OaiPmhMock.mock_oai_response_list_records()
)
resumption_token = "h34fh"
# Act
result, resumption_token = oai_verbs_api.list_records(
url=self.url,
metadata_prefix=self.metadata_prefix,
set_h=self.set,
from_date=self.from_,
until_date=self.until,
resumption_token=resumption_token,
)
# Asset
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertNotEqual(resumption_token, None)
self.assertTrue(len(result.data), 1)
| UTF-8 | Python | false | false | 7,759 | py | 114 | tests_unit.py | 84 | 0.624952 | 0.614254 | 0 | 240 | 31.329167 | 87 |
YamiAtem/Project-C104 | 3,358,664,442,333 | eb85512c753c9162e18307995cb03d86022a35df | 5157f6cef910ac470903ac14cd571f1deec7e0c7 | /main.py | 164ffa6d7e8c9364048c2aeb40e16f72758e2187 | []
| no_license | https://github.com/YamiAtem/Project-C104 | c47e37f35fb1a531877ec2cf1adf20e6967b812c | 5edd726da3324962f67c5e0987f3262822a3c5ca | refs/heads/main | 2023-07-01T22:14:03.740528 | 2021-08-16T07:29:08 | 2021-08-16T07:29:08 | 366,731,122 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from mean import get_mean
from median import get_median
from mode import get_mode
mean = get_mean("SOCR-HeightWeight.csv")
median = get_median("SOCR-HeightWeight.csv")
mode = get_mode("SOCR-HeightWeight.csv")
print(f"Mean: {mean}")
print(f"Median: {median}")
print(f"Mode: {mode:2f}")
| UTF-8 | Python | false | false | 298 | py | 1 | main.py | 1 | 0.701342 | 0.697987 | 0 | 11 | 25.090909 | 44 |
jwinfield26/Grad_school | 13,752,485,318,759 | b9ab4a707aef1cc8a719406fb684a4af1beb63dd | 98db0da79a1acba389dd4ae62ad28fa53a875cc8 | /Python_Programming/Assignment2/assignment2_part4.py | c82453f8fa1c9d62a10303e22bc94b6014c463db | []
| no_license | https://github.com/jwinfield26/Grad_school | b8603987f7c06d31bbf088681968de40a99c1100 | 926e0ce2d5bfd9b77e89fc96ad809ec7dffee7a3 | refs/heads/master | 2021-02-20T12:33:40.334083 | 2020-08-12T04:02:24 | 2020-08-12T04:02:24 | 245,336,366 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##################################################################################
# Justin Winfield
# Monday, October 8th
# I have not given or received any unauthorized assistance on this assignment
##################################################################################
def throwPairDice():
"Throwing a pair of dice"
import random
throw = random.randrange(1,7) + random.randrange(1,7) # Simulates rolling of a pair of dice
return throw
def throwThirdDice():
"Throwing an extra dice if User wants to double their bet if they lost on the first throw"
import random
thirdThrow = random.randrange(1,7) # Simulates the third dice
return thirdThrow
# Main Game
account = 100
while True:
bet = eval(input("Place your bet: "))
if bet <= account and bet > 0:
firstRoll = throwPairDice()
if firstRoll in (7,12): # Check to see if the User rolls a 7 or a 12
winnings = 2*bet
newAccount = account + winnings # Double the user's total bet
print("You rolled a {}. You Win! Wininngs: {}, New Account Balance: {}".format(firstRoll, winnings, newAccount))
account = newAccount
else:
print("You rolled a {}".format(firstRoll))
retry = eval(input("Want to try again? [1] - Yes, [2] - No: "))
if retry == 2:
newAccount = account - bet
print("You rolled a {}. You Lose! New Account Balance: {}".format(firstRoll, newAccount))
account = newAccount
if retry == 1:
doubleBet = bet * 2
if doubleBet >= account: # Don't have enough money to play
newAccount = account - bet
print("You do not have the sufficient funds to play. You lose! New Account Balance: {}".format(newAccount))
account = newAccount
else:
newRoll = throwThirdDice()
combined = firstRoll + newRoll
if combined in (7,12): # Triple their total bet
winnings = 3 * doubleBet
newAccount = account + winnings
print("You rolled a {}. You Win! Wininngs: {}, New Account Balance: {}".format(combined, winnings, newAccount))
account = newAccount
else:
newAccount = account - doubleBet
print("You rolled a {}. You Lose! New Account Balance: {}".format(combined, newAccount))
account = newAccount
else: # if the user has no 'money' to play
print("You do not have the sufficient funds to play. You lose! New Account Balance: {}".format(account))
account = newAccount | UTF-8 | Python | false | false | 2,800 | py | 32 | assignment2_part4.py | 13 | 0.533214 | 0.523571 | 0 | 61 | 44.918033 | 135 |
student-work-agu-gis2021/lesson6-dataprocessing-with-pandas-YoshitoKonno | 2,147,483,695,224 | 65e03295cc23e9c6fa73328ed2cca1abe487cc57 | a4b64f30588158d9a90c5c83d47cedb3f9345a2c | /Exercise_6_Problems_1_2.py | 9f15d363895175bda5e33eb9cfc2e41f5aed6f0e | []
| no_license | https://github.com/student-work-agu-gis2021/lesson6-dataprocessing-with-pandas-YoshitoKonno | 35eef994cef339ad2bb906eca606dc62cf20896f | fbe72f38823d9666f78d9847ba7a67a5b58fabe9 | refs/heads/master | 2023-05-01T16:27:10.762446 | 2021-05-23T13:34:51 | 2021-05-23T13:34:51 | 368,017,758 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Exercise 6: Weather data calculation
#
# ### Part 1
#
# You should start by reading the data file.
#
# - Read the data file into variable the variable `data`
# - Skip the second row
# - Convert the no-data values (`-9999`) into `NaN`
import pandas as pd
import numpy as np
data = None
# YOUR CODE HERE 1
# Read the data file. skip the second row and convert the no-data values into NaN.
data=pd.read_csv('data/1091402.txt',skiprows=[1],delim_whitespace=True,na_values=-9999)
# ### Part 2
#
# In this section, you will calculate simple statistics based on the input data:
#
# - Calculate how many no-data (NaN) values there are in the `TAVG` column
# - Assign your answer to a variable called `tavg_nodata_count`.
tavg_nodata_count = None
#YOUR CODE HERE 2
#variable 'a' means all columns of 'TAVG'
#tavg_nodata_count means count of nodata of 'TAVG'.
a=data['TAVG']
tavg_nodata_count=a.isnull().sum()
#for i in range(tavg.length()):
#if tavg[i]==-9999:
# tavg_nodata_count=tavg_nodata_count+1
#CAUTION!!! DON'T EDIT THIS PART START
# Print out the solution:
print('Number of no-data values in column "TAVG":',tavg_nodata_count)
#CAUTION!!! DON'T EDIT THIS PART END
# - Calculate how many no-data (NaN) values there are for the `TMIN` column
# - Assign your answer into a variable called `tmin_nodata_count`
tmin_nodata_count = None
#YOUR CODE HERE 3
#variable 'b' means all columns of 'TMIN'
#tmin_nodata_count means count of nodata of 'TMIN'.
b=data['TMIN']
tmin_nodata_count=b.isnull().sum()
#CAUTION!!! DON'T EDIT THIS PART START
# Print out the solution:
print('Number of no-data values in column "TMIN":', tmin_nodata_count)
#CAUTION!!! DON'T EDIT THIS PART END
# - Calculate the total number of days covered by this data file
# - Assign your answer into a variable called day_count
day_count = None
#YOUR CODE HERE 4
#variable 'c' means all columns of 'DATE'
#day_count means count of date
c=data['DATE']
day_count=c.count()
#CAUTION!!! DON'T EDIT THIS PART START
# Print out the solution:
print("Number of days:", day_count)
#CAUTION!!! DON'T EDIT THIS PART END
# - Find the date of the oldest (first) observation
# - Assign your answer into a variable called `first_obs`
first_obs = None
# YOUR CODE HERE 5
#first_obs means first columns of 'DATE'
first_obs = data['DATE'][0]
#CAUTION!!! DON'T EDIT THIS PART START
# Print out the solution:
print('Date of the first observation:',first_obs)
#CAUTION!!! DON'T EDIT THIS PART END
# - Find the date of the most recent (last) observation
# - Assign your answer into a variable called `last_obs`
last_obs = None
# YOUR CODE HERE 6
#last_obs means last columns of 'DATE'
last_obs = data['DATE'][len(data['DATE'])-1]
#CAUTION!!! DON'T EDIT THIS PART START
# Print out the solution:
print('Date of the last observation:', last_obs)
#CAUTION!!! DON'T EDIT THIS PART END
# - Find the average temperature for the whole data file (all observtions) from column `TAVG`
# - Assign your answer into a variable called `avg_temp`
avg_temp = None
# YOUR CODE HERE 7
sum_temp=0#sum_temp mean total of tempature but it doesn't include nodata
for i in range(len(data['TAVG'])-1):
#if "data['TAVG'][i]" is not nodata, we add "data['TAVG'][i]" to sum_temp
if(np.isnan(data['TAVG'][i])==False):
sum_temp+=data['TAVG'][i]
#avg_temp means average tempature for the whole data file from column 'TAVG'
avg_temp=sum_temp/(len(data['TAVG'])-tavg_nodata_count)
#CAUTION!!! DON'T EDIT THIS PART START
# Print out the solution:
print('Average temperature (F) for the whole dataset:', round(avg_temp, 2))
#CAUTION!!! DON'T EDIT THIS PART END
# - Find the average `TMAX` temperature over the Summer of 1969 (months May, June, July, and August of the year 1969)
# - Assign your answer into a variable called `avg_temp_1969`
avg_temp_1969 = None
# YOUR CODE HERE 8
sum_temp_1969=0##sum_temp mean total of tempature in 1969 5/1~8/31 but it doesn't include nodata
sum_temp_1969_count=0#total of data in 1969 5/1~8/31
for i in range(len(data['TMAX'])-1):
if(np.isnan(data['TMAX'][i])==False)and(data['DATE'][i]>=19690501)and(data['DATE'][i]<=19690831):
sum_temp_1969+=data['TMAX'][i]
sum_temp_1969_count+=1
#avg_temp_1969 means average tempature for the 1969 5/1~8/31 from column 'TAVG'
avg_temp_1969=sum_temp_1969/sum_temp_1969_count
#CAUTION!!! DON'T EDIT THIS PART START
# This test print should print a number
print('Average temperature (F) for the Summer of 69:', round(avg_temp_1969, 2))
#CAUTION!!! DON'T EDIT THIS PART END
# ## Problem 2 - Calculating monthly average temperatures (*3 points*)
#
monthly_data = None
# YOUR CODE HERE 9
month=[]#month[i] means month of data['DATE'][i]
celsius_temp=[]#celsius_temp[i] means tempature of data['TAVG'] #convert Fahrenheit to Celsius
total_temp=0#we use this variable to caluculate total of tempature each month
day_count=0# we use this variable to count "day"
month_ave=[]#month_ave means data of all "total_temp/day_count"
for i in range(len(data['DATE'])-1):
month.append(int(data['DATE'][i]%10000/100))
celsius_temp.append((data['TAVG'][i]-32.0)/1.8)
for i in range(len(data['DATE'])-1):
if(i==0):#in case, i=0
total_temp+=celsius_temp[i]
day_count+=1
else:#in case, i≠0
if(month[i-1]!=month[i]):
month_ave.append(total_temp/day_count)
total_temp=celsius_temp[i]
day_count=1 #initialization of day_count
else:
total_temp+=celsius_temp[i]
day_count+=1
month_ave.append(total_temp/day_count)
monthly_data=pd.DataFrame({'temp_celsius':month_ave})
#CAUTION!!! DON'T EDIT THIS PART START
# This test print should print the length of variable monthly_data
print(len(monthly_data))
# This test print should print the column names of monthly_data
print(monthly_data.columns.values)
# This test print should print the mean of temp_celsius
print(round(monthly_data['temp_celsius'].mean(),2))
# This test print should print the median of temp_celsius
print(round(monthly_data['temp_celsius'].median(), 2))
#CAUTION!!! DON'T EDIT THIS PART END
def func1():
return tavg_nodata_count
def func2():
return tmin_nodata_count
def func3():
return day_count
def func4():
return first_obs
def func5():
return last_obs
def func6():
return round(avg_temp,2)
def func7():
return round(avg_temp_1969,2)
def func8():
return len(monthly_data)
def func9():
return monthly_data.columns.values
def func10():
return round(monthly_data['temp_celsius'].mean(),2)
def func11():
return round(monthly_data['temp_celsius'].median(),2) | UTF-8 | Python | false | false | 6,663 | py | 1 | Exercise_6_Problems_1_2.py | 1 | 0.697193 | 0.668668 | 0 | 237 | 27.109705 | 117 |
willzhang05/ion | 6,313,601,959,111 | 6711e644c362bc1e568cacd22e39abbe6c70a7d9 | 87fa69391bcc22ba6d9839fc8ddad98ed2fbbdf0 | /intranet/apps/announcements/api.py | 2306e2c0805a98a66162ecfa85620c3829404f46 | []
| no_license | https://github.com/willzhang05/ion | b1945209266099a130d4dd89b6b6b08e625ec00b | f12c49dad6bc0eaff85d4a49bd99a73a1e486378 | refs/heads/master | 2021-01-21T15:57:02.667770 | 2015-11-19T18:36:04 | 2015-11-19T18:36:04 | 46,525,786 | 0 | 0 | null | true | 2015-11-19T23:03:41 | 2015-11-19T23:03:41 | 2015-11-17T17:25:53 | 2015-11-19T21:20:10 | 24,140 | 0 | 0 | 0 | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from rest_framework import generics, permissions
from .models import Announcement
from .serializers import AnnouncementSerializer
logger = logging.getLogger(__name__)
class IsAnnouncementAdminOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return (request.user and request.user.is_authenticated() and
(request.method in permissions.SAFE_METHODS or
request.user.is_announcements_admin))
class ListCreateAnnouncement(generics.ListCreateAPIView):
serializer_class = AnnouncementSerializer
permission_classes = (IsAnnouncementAdminOrReadOnly,)
def get_queryset(self):
user = self.request.user
return Announcement.objects.visible_to_user(user).prefetch_related("groups")
class RetrieveUpdateDestroyAnnouncement(generics.RetrieveUpdateDestroyAPIView):
serializer_class = AnnouncementSerializer
permission_classes = (IsAnnouncementAdminOrReadOnly,)
def get_queryset(self):
user = self.request.user
return Announcement.objects.visible_to_user(user).prefetch_related("groups")
| UTF-8 | Python | false | false | 1,192 | py | 278 | api.py | 166 | 0.75 | 0.749161 | 0 | 35 | 33.057143 | 84 |
geyunxiang/mmdps | 4,269,197,514,948 | 5e65bd13f11f12afb18b1fd80204d14c5f0e9ba3 | d27d2e68336b681b07620c574ec562b7dd236d30 | /tools/ui_programs/ui_runpara.py | d0cdfd4e9a06259aaecd7bcedcf5f1a6d77c37fa | []
| no_license | https://github.com/geyunxiang/mmdps | de241bd607bc8230ade83fa75e4890f0d84747d6 | dabfabdeb2f922a3dcbdaf3fc46f0c4b40598279 | refs/heads/master | 2023-06-01T04:46:05.388686 | 2021-08-26T02:49:53 | 2021-08-26T02:49:53 | 117,055,236 | 5 | 3 | null | false | 2023-03-10T09:54:02 | 2018-01-11T05:38:13 | 2022-07-12T06:32:29 | 2022-05-06T06:16:49 | 22,039 | 4 | 2 | 1 | Python | false | false | import os
import tkinter as tk
from mmdps.gui import guiframe, tktools, field
from mmdps.util.loadsave import load_json_ordered
from mmdps.proc import para
class RunParaApplication(guiframe.MainWindow):
def __init__(self, master=None, **kw):
guiframe.MainWindow.__init__(self, master, **kw)
self.build_actions()
self.build_widgets()
def build_actions(self):
pass
def build_widgets(self):
d = {'typename': 'FileEditField', 'name': 'ParaConfig', 'value': ''}
self.fpara = field.create(d)
d = {'typename': 'StringField', 'name': 'ProjectPathVar', 'value': '.'}
self.fpath = field.create(d)
wpara = self.fpara.build_widget(self.mainframe)
wpath = self.fpath.build_widget(self.mainframe)
wrun = tktools.button(self.mainframe, 'Run', self.cb_run)
wpara.pack()
wpath.pack()
wrun.pack()
def cb_run(self):
paraconf = self.fpara.value
pathconf = self.fpath.value
os.environ['MMDPS_PROJECTPATH'] = pathconf
j = para.load(load_json_ordered(paraconf))
j.run()
if __name__ == '__main__':
root = tk.Tk()
root.geometry('800x600')
app = RunParaApplication(root)
app.pack(fill='both', expand=True)
root.title('MMDPS RunPara')
root.mainloop()
| UTF-8 | Python | false | false | 1,178 | py | 315 | ui_runpara.py | 154 | 0.691851 | 0.686757 | 0 | 42 | 27.047619 | 73 |
alanpropp/compciv-2016 | 18,803,366,845,857 | b6c03300abd431c541a22e975d87f8aca49e931a | b962804dec069c6076650efe5e609ca4da97651e | /exercises/0020-gender-detector/i.py | a53c892434af28fcca43729541ebfacb3c2e9174 | []
| no_license | https://github.com/alanpropp/compciv-2016 | 8accbcbe31ae76a0e9a401c862f68fad8c23bb57 | 355351d1748a0d296a9d2a5aaaf399ee7488e8ff | refs/heads/master | 2021-01-10T14:58:17.950577 | 2016-03-15T23:35:05 | 2016-03-15T23:35:05 | 49,608,092 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from os.path import join, basename
import csv
YEAR = 2014
DATA_DIR = 'tempdata'
filename = join(DATA_DIR, 'wrangled2014.csv')
finalfile = open(filename, 'r')
count = 0
bigdatarow = []
reader = csv.reader(finalfile)
next(reader, None)
#Go through CSV file
for line in reader:
if int(line[6]) >= 100 :
bigdatarow.append(line)
ratios = [60, 70, 80, 90, 99]
total = len(bigdatarow)
print("Popular names in 2014 with gender ratio less than or equal to:")
for ratio in ratios:
babies = 0
for line in bigdatarow:
if int(line[3]) <= ratio:
babies += 1
print(" " + str(ratio) + "%: " + str(babies) + "/" + str(total)) | UTF-8 | Python | false | false | 625 | py | 58 | i.py | 48 | 0.6672 | 0.6192 | 0 | 28 | 21.357143 | 71 |
jijiaxing/python_basic | 3,573,412,807,782 | 55493191ae6358bb95b8adc0a17796abcb6dd6fa | 9ec5861148b0db44d50170e26072782f1d9946ff | /经典算法50题python版/13。完全平方.py | 97d3fd9d6a5511b8446b8e4beeaacaacd8fa6d30 | []
| no_license | https://github.com/jijiaxing/python_basic | 771be5223cba0fbd5f3e5c45411a23cf871cad12 | 140533152852e23fb62c159eb83dbe05a5123f02 | refs/heads/master | 2021-08-18T16:25:36.693046 | 2017-11-23T07:33:15 | 2017-11-23T07:33:15 | 109,068,429 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #一个整数,它加上100后是一个完全平方数,加上168又是一个完全平方数,
# 请问该数是多少?
import math
for num in range(10000):
a = math.sqrt(num + 100)
b = math.sqrt(num + 100 + 168)
if a // 1 == a and b // 1 == b: #判断是不是整数
print(num)
| UTF-8 | Python | false | false | 312 | py | 104 | 13。完全平方.py | 89 | 0.574661 | 0.475113 | 0 | 10 | 21 | 46 |
mikbuch/ssvep_ica | 19,610,820,685,506 | 41dc5fe6a3c70648986a0677911b54670169e849 | 2eefb34907fd853d3068ca49b8cb65dc60cdf325 | /dataset_MNE_compatible.py | f65bd391b174048bbd258f55e339ec55c894ca2c | []
| no_license | https://github.com/mikbuch/ssvep_ica | 65c50e99468acf86e00c3e1007457818fd13814d | 68a5ba858cb6d2816c2bacfc53facd5fd5a23d53 | refs/heads/master | 2020-06-14T04:55:49.371287 | 2017-02-13T13:33:25 | 2017-02-13T13:33:25 | 75,231,895 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scipy.io as sio
import numpy as np
from sklearn import preprocessing
import mne
def load_matlab_data(filepath, var_name=None):
data = sio.loadmat(filepath)
if var_name is not None:
data = data[var_name]
return data
def load_coords(filepath, delimiter=',', scale_0_1=True):
coords = np.loadtxt(filepath, delimiter=delimiter)
if scale_0_1:
coords = preprocessing.MinMaxScaler().fit_transform(coords)
return coords
def create_MNE_Raw(data_filepath, var_name, kind,
sfreq, delimiter_data=',', dbg=False):
"""
Based on: http://stackoverflow.com/a/38634620
"""
# Read the CSV file as a NumPy array.
data = load_matlab_data(data_filepath, var_name=var_name)
# Sampling rate of the machine [Hz].
sfreq = sfreq
# Channel types. From documentation:
'''
ch_types : list of str | str
Channel types. If None, data are assumed to be misc.
Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc',
'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'hbr' or 'hbo'.
If str, then all channels are assumed to be of the same type.
'''
ch_types = 'eeg'
montage = mne.channels.read_montage(kind)
# Create the info structure needed by MNE
info = mne.create_info(montage.ch_names, sfreq, ch_types, montage)
# Read montage.
# 3D montage ==> 2D montage
# https://gist.github.com/wmvanvliet/6d7c78ea329d4e9e1217
# info = mne.create_info(ch_names, sfreq, ch_types, montage)
layout = mne.channels.make_eeg_layout(info)
layout.pos = layout.pos[:-3]
# Update pos to 2D scheme.
montage.pos = layout.pos
# Remove last 3 electrodes.
montage.ch_names = montage.ch_names[:-3]
info = mne.create_info(montage.ch_names, sfreq, ch_types, montage)
# Finally, create the Raw object
raw = mne.io.RawArray(data, info)
if dbg:
# Plot it.
raw.plot()
return raw, layout
| UTF-8 | Python | false | false | 1,985 | py | 13 | dataset_MNE_compatible.py | 9 | 0.635768 | 0.620151 | 0 | 68 | 28.191176 | 75 |
varlociraptor/simulate-variants | 14,611,478,750,532 | f17ebf9163faeb67b0c107174bb8e638ee744739 | 614d8ce6d306e04d92178ec4b28abde2d4ec4233 | /workflow/Snakefile | 98bbe8ef0262ba6f83f30dbb9a3dbfcb047f217d | []
| no_license | https://github.com/varlociraptor/simulate-variants | 48011d6972a8ad86d6ecf95b4a984162d1156e96 | f4942e21582fd53beb689b07063f688c20182c46 | refs/heads/master | 2022-11-18T16:46:41.303102 | 2020-07-08T22:23:03 | 2020-07-08T22:23:03 | 278,143,819 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | rule all:
input:
expand("results/mapped/{depth}x.sorted.bam", depth=[5, 10, 20, 30, 40])
rule get_genome:
output:
"resources/genome.fasta"
params:
species="saccharomyces_cerevisiae",
datatype="dna",
build="R64-1-1",
release="100"
log:
"logs/get-genome.log"
cache: True # save space and time with between workflow caching (see docs)
wrapper:
"0.63.0/bio/reference/ensembl-sequence"
rule constrain_genome:
input:
fa="resources/genome.fasta",
fai="resources/genome.fasta.fai"
output:
"resources/chrI.genome.fasta"
log:
"logs/constrain-genome.log"
conda:
"envs/samtools.yaml"
shell:
"samtools faidx {input.fa} I > {output} 2> {log}"
rule mason_variator:
input:
fa="resources/chrI.genome.fasta",
fai="resources/chrI.genome.fasta.fai"
output:
"results/simulated.variants.vcf",
conda:
"envs/mason.yaml"
log:
"logs/mason/variator.log"
shell:
"mason_variator --sv-inversion-rate 0.0001 --sv-duplication-rate 0.0001 -ir {input.fa} -ov {output} 2> {log}"
rule mason_simulator:
input:
ref="resources/chrI.genome.fasta",
fai="resources/chrI.genome.fasta.fai",
vars="results/simulated.variants.vcf"
output:
"results/simulated.{depth}x.reads.1.fq.gz",
"results/simulated.{depth}x.reads.2.fq.gz"
params:
n_reads=lambda w: 230218 * int(w.depth)
conda:
"envs/mason.yaml"
log:
"logs/mason/simulator.{depth}x.log"
shell:
"mason_simulator -ir {input.ref} -n {params.n_reads} -iv {input.vars} -o {output[0]} -or {output[1]} 2> {log}"
rule fasta_index:
input:
"{prefix}.fasta"
output:
"{prefix}.fasta.fai"
wrapper:
"0.63.0/bio/samtools/faidx"
rule bwa_index:
input:
"resources/chrI.genome.fasta"
output:
multiext("resources/chrI.genome.fasta", ".amb", ".ann", ".bwt", ".pac", ".sa")
log:
"logs/bwa_index.log"
resources:
mem_mb=369000
wrapper:
"0.59.2/bio/bwa/index"
rule map_reads:
input:
reads=[
"results/simulated.{depth}x.reads.1.fq.gz",
"results/simulated.{depth}x.reads.2.fq.gz"
],
idx=rules.bwa_index.output,
output:
"results/mapped/{depth}x.sorted.bam"
log:
"logs/bwa_mem/{depth}.log"
params:
index=lambda w, input: os.path.splitext(input.idx[0])[0],
sort="samtools",
sort_order="coordinate"
threads: 8
wrapper:
"0.56.0/bio/bwa/mem"
rule samtools_index:
input:
"results/mapped/{depth}x.sorted.bam"
output:
"results/mapped/{depth}x.sorted.bam.bai"
wrapper:
"0.63.0/bio/samtools/index"
rule varlociraptor_preprocess:
input:
vcf="results/simulated.variants.vcf",
fa="resources/chrI.genome.fasta",
fai="resources/chrI.genome.fasta.fai",
bam="results/mapped/{depth}x.sorted.bam",
bai="results/mapped/{depth}x.sorted.bam.bai"
output:
"results/varlociraptor/preprocess/{depth}x.bcf"
log:
"logs/varlociraptor/preprocess/{depth}x.log"
shell:
"varlociraptor preprocess variants {input.fa} --candidates {input.vcf} --bam {input.bam} --output {output} 2> {log}"
rule varlociraptor_testcase:
input:
obs="results/varlociraptor/preprocess/{depth}x.bcf",
scenario="resources/scenario.yaml"
output:
directory("resources/testcases/{chrom}:{pos}.{depth}x")
log:
"logs/varlociraptor/testcase/{chrom}:{pos}.{depth}x.log"
shell:
"varlociraptor call variants "
"--testcase-locus {wildcards.chrom}:{wildcards.pos} "
"--testcase-prefix {output} "
"generic "
"--obs simulated={input.obs} "
"--scenario {input.scenario} "
"2> {log}"
| UTF-8 | Python | false | false | 3,974 | 2 | Snakefile | 1 | 0.588072 | 0.569955 | 0 | 147 | 26.034014 | 124 |
|
DaVinci42/LeetCode | 6,004,364,303,935 | 3ecc309786a9be6c264a6a41cd98ba3d30e9dcc7 | a0a3749c294de58cfb5f6de21061ba1ee95b08cf | /837.New21Game.py | d269765ca3e266943df287a25317b03df4976853 | []
| no_license | https://github.com/DaVinci42/LeetCode | 7f20ead30f2626f24e7afdf443dd4e35b331a42e | 5b4cea4bd91de3022bf90eb8e3b3b40e08729ed9 | refs/heads/master | 2021-07-16T17:28:23.954378 | 2021-03-19T04:45:41 | 2021-03-19T04:45:41 | 99,877,612 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def new21Game(self, N: int, K: int, W: int) -> float:
if K <= 0:
return 1
dp, preSum = [1] * (N + 1), 1
for i in range(1, N + 1):
dp[i] = 1 / W * preSum
if i < K:
preSum += dp[i]
if i >= W:
preSum -= dp[i - W]
return sum(dp[K : N + 1]) | UTF-8 | Python | false | false | 369 | py | 147 | 837.New21Game.py | 146 | 0.365854 | 0.336043 | 0 | 14 | 25.428571 | 57 |
kha-dinh/life-scheduler | 7,395,933,696,583 | a241ae537cc0cadb8f190977f6c1da3c03d7ed6f | 43b5894b2a4e7efd93161a20ce6a5a916d0d0f84 | /life_scheduler/__init__.py | 2070722a43fb177da29f75c0af737fc9bb0bee09 | [
"BSD-2-Clause"
]
| permissive | https://github.com/kha-dinh/life-scheduler | 64e910a99884d73cfac1a3f418d2fcf5f105b0ee | 1ea9c05ba7f9cf128c708533b18f2ab4e2d3ec17 | refs/heads/main | 2023-06-29T23:49:30.824823 | 2021-08-08T06:22:29 | 2021-08-08T06:22:29 | 391,521,651 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .algorithms import Task, TaskList | UTF-8 | Python | false | false | 38 | py | 4 | __init__.py | 3 | 0.842105 | 0.842105 | 0 | 1 | 38 | 38 |
LJHG/weirdWebsite | 19,490,561,629,267 | b4ee0d14296252c09426892001e4a1c7fe3959f8 | fdfc5914337f04168675fed5aefd5dd37f1ec650 | /app01/views.py | 9d5c4fd3d9afaff6af4ead9b145de0c64b5a31fb | []
| no_license | https://github.com/LJHG/weirdWebsite | 9c02d112431145e754946c33b1ad5623f9985332 | c10c5c137158cd9f474da396f6872b0f15c789e9 | refs/heads/master | 2023-07-16T01:29:36.461565 | 2021-08-15T08:57:54 | 2021-08-15T08:57:54 | 223,544,056 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.shortcuts import HttpResponse
import json
import numpy as np
import pandas as pd
import os
# pieces = []
# for year in range(1880,2011):
# frame = pd.read_csv('../../babynames/yob%d.txt'%year,names=['name','gender','frequency']) #用这个比loadtxt再转dataframe快
# frame['year']=year
# #print(frame)
# #print(frame['frequency'].sum())
# pieces.append(frame)
# #print(pieces)
# names = pd.concat(pieces,ignore_index=True)#ignore_index=true 重新索引
names = None
data = None
users_info = None
user_item = None
def read_data():
global names
pieces = []
for year in range(1880,2011):
frame = pd.read_csv('app01/static/babynames/yob%d.txt'%year,names=['name','gender','frequency']) #注意这里的路径是相对于manage.py的
frame['year']=year
#print(frame)
#print(frame['frequency'].sum())
pieces.append(frame)
names = pd.concat(pieces,ignore_index=True)#ignore_index=true 重新索引
return
def get_births(name,year):
#对应goto_b1
tmpframe = names[(names['name']==name) & (names['year']==year)] #设定条件进行查询
ans = tmpframe['frequency'].sum() #对某一列求和 .sum后面要打()
return(ans)
def get_births_years(name,start_year,end_year):
#输入姓名、开始年份和结束年份,绘制该姓名在各年份出生人数折线图;
frame = names[(names['name']==name) & (names['year']>=start_year) & (names['year']<=2010)]
df=frame.groupby('year').sum()
return df.index.values.tolist(),df.values.flatten().tolist()
def multi_person_births(name,start_year,end_year):
namesum=[]
for item in name:
frame = names[(names['name']==item) & (names['year']>=start_year) & (names['year']<=2010)]
df=frame.groupby('year').sum()
namesum.append(df['frequency'].sum())
return namesum
def survivor(name,start_year,end_year,lifespan):
alive = []
tmpframe = names[(names['name']==name) & (names['year']>=(start_year-lifespan)) & (names['year']<=end_year)]#先筛选需要用到的数据
year=tmpframe.groupby('year').sum()
for i in range(start_year,end_year+1):
#print(i)
if(i<1880):
ans =0
elif(i>=1880 and (i-lifespan) <1880):
ans = year.loc[1880:i]['frequency'].sum()
else:
ans = year.loc[i-lifespan:i]['frequency'].sum()
alive.append(ans)
year = np.linspace(start_year,end_year,end_year-start_year+1)
return year.tolist(),alive
def Correlation(namea,nameb,start_year,end_year):
tmpa = names[(names['name']==namea) & (names['year']>=start_year) & (names['year']<=end_year)]
a=tmpa.groupby('year').sum()
tmpb = names[(names['name']==nameb) & (names['year']>=start_year) & (names['year']<=end_year)]
b=tmpb.groupby('year').sum()
correlation = a.corrwith(b).values[0]
year = a.index.values.tolist()
return a.values.flatten().tolist(),b.values.flatten().tolist(),correlation,year
def births_oneyear_sort(year):
tmp = names[ (names['year']==year)]
dict ={}
for index, row in tmp.iterrows():
if(row['name'] in dict):
dict[row['name']]= dict[row['name']] + row['frequency']
else:
dict[row['name']]= row['frequency']
ans = pd.DataFrame.from_dict(dict, orient='index', columns=['values'])
ans = ans.sort_values(by = 'values',ascending = False)
return ans.index.tolist(),ans.values.flatten().tolist()
def most_5(year):
males=[]
females=[]
tmp = names.groupby(['year','gender'])
for index,group in tmp:
if(group['gender'].iloc[0] == 'M'):
males.append(group.values[:5])
else:
females.append(group.values[:5])
names_list_m = []
num_list_m =[]
names_list_f = []
num_list_f =[]
for item in males[year-1880]:
names_list_m.append(item[0])
num_list_m.append(item[2])
for item in females[year-1880]:
names_list_f.append(item[0])
num_list_f.append(item[2])
return names_list_m,num_list_m,names_list_f,num_list_f
#电影推荐部分函数
def read_data_movie():
#读取数据
global user_item
global data
global users_info
rnames = ['user_id', 'movie_id', 'rating', 'timestamp']
ratings = pd.read_table('app01/static/ml_1m/ratings.dat', sep='::', header=None, names=rnames,usecols = [0,1,2],engine='python')
mnames = ['movie_id', 'title', 'genres']
movies = pd.read_table('app01/static/ml_1m/movies.dat', sep='::', header=None, names=mnames,usecols = [0,1],engine='python')
data = pd.merge(ratings,movies)
users_info = pd.read_table('app01/static/ml_1m/users.dat', sep='::', header=None, names=['user_id','gender','age','occupation'],usecols = [0,1,2,3],engine='python')
#print(data)
#print(users_info)
# 转换成User-Item矩阵
user_item = ratings.pivot(index='user_id', columns='movie_id', values='rating')
def common_score(user1,user2):
dict={}
bool_array = user_item.loc[user1].notnull() & user_item.loc[user2].notnull()
vector1 = user_item.loc[user1,bool_array]
vector2 = user_item.loc[user2,bool_array]
for i,v in vector1.items():
score=[]
score.append(user_item.loc[user1][i])
score.append(user_item.loc[user2][i])
#在data中去查找电影名字
moviename = data[data['movie_id']==i]['title'].iloc[0]
dict[moviename] = score
return dict
def cal_simi(user1,user2):
bool_array = user_item.loc[user1].notnull() & user_item.loc[user2].notnull() # scoi
vector1 = user_item.loc[user1,bool_array]
vector2 = user_item.loc[user2,bool_array]
denominator = (sum(vector1*vector1)*sum(vector2*vector2))**0.5
if(denominator == 0):
return 0
s = sum(vector1*vector2) / denominator
return s
def most_similar_5(user):
dict={}
#noloinoloi
for i in user_item.index:
if(i == user):
continue
dict[i] = cal_simi(user,i)
#对字典进行排序
dict = sorted(dict.items(), key=lambda x: x[1],reverse = True)
ans = dict[0:5]
#ansdict user_id:[id,gender,age,job]
ansdict={}
for item in ans:
info=[]
u = users_info[users_info['user_id']==item[0]].values[0]
ansdict[item[0]] = u
#ansdict里面的数字具体代表什么这里就不写了
return ansdict
def recommend_movies(user):
#读取最受欢迎的电影列表 从上往下遍历, 这样推荐出来的顺序就是按欢迎程度排序的
polular_movies = pd.read_csv('app01/static/ml_1m/popular_movies.csv')
recommend_users = most_similar_5(user)
recommend_array = user_item.loc[user].notnull()
cur_array = user_item.loc[user].notnull()
for item in recommend_users:
recommend_array = recommend_array | user_item.loc[item].notnull()
#我TM傻了
recommend_array = recommend_array & user_item.loc[user].isnull()#去除自己已经打过分的电影
#print(recommend_array)
recommend_list=[]
for index,row in polular_movies.iterrows():
# l = []
# for item in recommend_users:
# l.append(user_item.loc[item][row['movieid']])
# print(row['movieid'])
# print(l)
if(cur_array[row['movieid']] == False and recommend_array[row['movieid']] == True):
#要推荐
moviename = data[data['movie_id']==row['movieid']]['title'].iloc[0]
recommend_list.append(moviename)
return recommend_list,recommend_users
#下面是网页相关的函数
def gotoindex(request):
global names
if(names is None):
read_data()
print(names)
return render(request,'index.html')
def goto_b1(request):
global names
if(names is None):
read_data()
if(request.POST):
name = request.POST.get('name')
year = int(request.POST.get('year'))
name_list =[]
year_list = []
num_list = []
ans = get_births(name,year)
name_list.append(name)
year_list.append(year)
num_list.append(ans)
print(name_list)
print(year_list)
print(num_list)
return render(request,'babynames_1.html',{'name':name_list,'year':year_list,'num':num_list})
else:
return render(request,'babynames_1.html')
def goto_b2(request):
global names
if(names is None):
read_data()
if(request.POST):
name = request.POST.get('name')
start_year = int(request.POST.get('start_year'))
end_year = int(request.POST.get('end_year'))
name_list =[]
year_list = []
num_list = []
name_list.append(name)
year_list,num_list=get_births_years(name,start_year,end_year)
print(name_list)
print(year_list)
print(num_list)
return render(request,'babynames_2.html',{'name':name_list,'year':year_list,'num':num_list})
else:
print("this is not post!\n")
return render(request,'babynames_2.html')
def goto_b3(request):
global names
if(names is None):
read_data()
if(request.POST):
name = request.POST.get('name')
start_year = int(request.POST.get('start_year'))
end_year = int(request.POST.get('end_year'))
name_list =name.split(' ')
num_list =multi_person_births(name_list,start_year,end_year)
return render(request,'babynames_3.html',{'name':name_list,'start_year':start_year,'end_year':end_year,'num':num_list})
else:
print("this is not post!\n")
return render(request,'babynames_3.html')
def goto_b4(request):
global names
if(names is None):
read_data()
if(request.POST):
name = request.POST.get('name')
start_year = int(request.POST.get('start_year'))
end_year = int(request.POST.get('end_year'))
lifespan = int(request.POST.get('lifespan'))
name_list =[]
year_list = []
num_list = []
name_list.append(name)
year_list,num_list = survivor(name,start_year,end_year,lifespan)
print(type(year_list))
print(type(num_list))
return render(request,'babynames_4.html',{'name':name_list,'year':year_list,'num':num_list})
else:
print("this is not post!\n")
return render(request,'babynames_4.html')
def goto_b5(request):
global names
if(names is None):
read_data()
if(request.POST):
namea = request.POST.get('namea')
nameb = request.POST.get('nameb')
start_year = int(request.POST.get('start_year'))
end_year = int(request.POST.get('end_year'))
namea_list =[]
nameb_list =[]
year_list = []
corre_list =[]
#name_list.append(nameb)
# for i in range(start_year,end_year+1):
# year_list.append(i)
num_lista,num_listb,correlation,year_list= Correlation(namea,nameb,start_year,end_year)
correlation=("%.5f" % correlation)
corre_list.append(correlation)
#name 一定要转成list传出去
namea_list.append(namea)
nameb_list.append(nameb)
print(len(year_list))
print(len(num_lista))
print(len(num_listb))
return render(request,'babynames_5.html',{'namea':namea_list,'nameb':nameb_list,'year':year_list,'numa':num_lista,'numb':num_listb,'correlation':corre_list})
else:
print("this is not post!\n")
return render(request,'babynames_5.html')
def goto_b6(request):
global names
if(names is None):
read_data()
if(request.POST):
year = int(request.POST.get('year'))
name_list =[]
year_list = []
num_list = []
year_list.append(year)
name_list,num_list = births_oneyear_sort(year)
return render(request,'babynames_6.html',{'name':name_list[0:5],'year':year_list,'num':num_list[0:5]})
else:
return render(request,'babynames_6.html')
def goto_b7(request):
global names
if(names is None):
read_data()
if(request.POST):
year = int(request.POST.get('year'))
year_list = []
name_list_m =[]
num_list_m = []
name_list_f = []
num_list_f = []
year_list.append(year)
name_list_m,num_list_m,name_list_f,num_list_f = most_5(year)
return render(request,'babynames_7.html',{'name_m':name_list_m,'year':year_list,'num_m':num_list_m,'name_f':name_list_f,'num_f':num_list_f})
else:
return render(request,'babynames_7.html')
def goto_movie1(request):
global data
global users_info
global user_item
#if(data == None or users_info == None or user_item == None):
#每次进入页面都要重新读取电影文件(好慢= =)
read_data_movie()
if(request.POST):
dic ={}
#从页面获取评分结果并写入字典
if(request.POST.get('2858') != 'null' and request.POST.get('2858') !=None):
dic[2858] = int(request.POST.get('2858'))
if(request.POST.get('260') != 'null' and request.POST.get('260') !=None):
dic[260] = int(request.POST.get('260'))
if(request.POST.get('1196') != 'null' and request.POST.get('1196') !=None):
dic[1196] = int(request.POST.get('1196'))
if(request.POST.get('1210') != 'null' and request.POST.get('1210') !=None):
dic[1210] = int(request.POST.get('1210'))
if(request.POST.get('480') != 'null' and request.POST.get('480') != None):
dic[480] = int(request.POST.get('480'))
if(request.POST.get('2028') != 'null' and request.POST.get('2028') !=None):
dic[2028] = int(request.POST.get('2028'))
if(request.POST.get('589') != 'null' and request.POST.get('589') != None):
dic[589] = int(request.POST.get('589'))
if(request.POST.get('2571') != 'null' and request.POST.get('2571') !=None):
dic[2571] = int(request.POST.get('2571'))
if(request.POST.get('1270') != 'null' and request.POST.get('1270') !=None):
dic[1270] = int(request.POST.get('1270'))
if(request.POST.get('593') != 'null' and request.POST.get('593') != None):
dic[593] = int(request.POST.get('593'))
if(request.POST.get('1580') != 'null' and request.POST.get('1580') !=None):
dic[1580] = int(request.POST.get('1580'))
if(request.POST.get('1198') != 'null' and request.POST.get('1198') !=None):
dic[1198] = int(request.POST.get('1198'))
if(request.POST.get('608') != 'null' and request.POST.get('608') != None):
dic[608] = int(request.POST.get('608'))
if(request.POST.get('2762') != 'null' and request.POST.get('2762') !=None):
dic[2762] = int(request.POST.get('2762'))
if(request.POST.get('110') != 'null' and request.POST.get('110') != None):
dic[110] = int(request.POST.get('110'))
if(request.POST.get('2396') != 'null' and request.POST.get('2396')!=None):
dic[2396] = int(request.POST.get('2396'))
if(request.POST.get('527') != 'null' and request.POST.get('527') != None):
dic[527] = int(request.POST.get('527'))
if(request.POST.get('1617') != 'null' and request.POST.get('1617') !=None):
dic[1617] = int(request.POST.get('1617'))
if(request.POST.get('1265') != 'null' and request.POST.get('1265') !=None):
dic[1265] = int(request.POST.get('1265'))
if(request.POST.get('1097') != 'null' and request.POST.get('1097') !=None):
dic[1097] = int(request.POST.get('1097'))
if(request.POST.get('318') != 'null' and request.POST.get('318') != None):
dic[318] = int(request.POST.get('318'))
if(request.POST.get('858') != 'null' and request.POST.get('858') != None):
dic[858] = int(request.POST.get('858'))
if(request.POST.get('356') != 'null' and request.POST.get('356') != None):
dic[356] = int(request.POST.get('356'))
if(request.POST.get('296') != 'null' and request.POST.get('296') != None):
dic[296] = int(request.POST.get('296'))
print(dic)
#新增加一个用户,加入到user_item列表里
new = pd.DataFrame(dic,index=[6041])
user_item = user_item.append(new,ignore_index=False)
#print(user_item)
#将用户输入的信息加入了user_item 下面开始推荐电影
r_movies,r_users = recommend_movies(6041)
print(r_movies[0:30])
print(r_users)
r_users_list =[]
for index in r_users:
r_users_list.append(index)
user1_commonscore=[]
user2_commonscore=[]
user3_commonscore=[]
user4_commonscore=[]
user5_commonscore=[]
common_score_list =[]
#算五个共同评分电影,加入列表(这是个中间变量)
for key in r_users:
common_score_list.append(common_score(6041,key))
#为5个相似用户与当前用户得出相同评分电影,加入各自的列表
for key in common_score_list[0]:
dic={}
dic['moviename'] = key
dic['myscore'] = common_score_list[0][key][0]
dic['userscore'] = common_score_list[0][key][1]
user1_commonscore.append(dic)
for key in common_score_list[1]:
dic={}
dic['moviename'] = key
dic['myscore'] = common_score_list[1][key][0]
dic['userscore'] = common_score_list[1][key][1]
user2_commonscore.append(dic)
for key in common_score_list[2]:
dic={}
dic['moviename'] = key
dic['myscore'] = common_score_list[2][key][0]
dic['userscore'] = common_score_list[2][key][1]
user3_commonscore.append(dic)
for key in common_score_list[3]:
dic={}
dic['moviename'] = key
dic['myscore'] = common_score_list[3][key][0]
dic['userscore'] = common_score_list[3][key][1]
user4_commonscore.append(dic)
for key in common_score_list[4]:
dic={}
dic['moviename'] = key
dic['myscore'] = common_score_list[4][key][0]
dic['userscore'] = common_score_list[4][key][1]
user5_commonscore.append(dic)
print(common_score_list)
print(user1_commonscore)
print(user2_commonscore)
print(user3_commonscore)
print(user4_commonscore)
print(user5_commonscore)
return render(request,'MovieResult.html',{
'movies':r_movies[0:30],
'user1_commonscore':user1_commonscore,
'user2_commonscore':user2_commonscore,
'user3_commonscore':user3_commonscore,
'user4_commonscore':user4_commonscore,
'user5_commonscore':user5_commonscore,
'user_list':r_users_list,
'user1id':r_users_list[0], #还要拿出来传一遍,草
'user2id':r_users_list[1],
'user3id':r_users_list[2],
'user4id':r_users_list[3],
'user5id':r_users_list[4],
})
return render(request,'MovieRecommend_1.html') | UTF-8 | Python | false | false | 19,255 | py | 10 | views.py | 2 | 0.584648 | 0.553857 | 0 | 505 | 35.788119 | 168 |
gcallah/colossus | 11,218,454,597,134 | ac0160f5fe08edcc4d642c4b82efaecb2f13089e | 30a47b2cf4213359e761cd339f4976d2913e1ec0 | /colossus/apps/lists/views.py | 81811a1fa935fa980a0b9eb194f2775531a63a33 | [
"MIT"
]
| permissive | https://github.com/gcallah/colossus | 11aac2f23709c34ae4f0ff67545817338ca5f4c5 | ee5319091cd19c96987825258a57e6d6f9d8fc51 | refs/heads/master | 2020-08-19T01:17:02.102778 | 2020-05-12T02:31:31 | 2020-05-12T02:31:31 | 215,858,204 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
from typing import Any, Dict
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import Count, Q
from django.forms import modelform_factory
from django.http import Http404, HttpRequest, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import gettext, gettext_lazy as _
from django.views.generic import (
CreateView, DeleteView, DetailView, FormView, ListView, TemplateView,
UpdateView, View,
)
from django.views.decorators.csrf import csrf_exempt
from colossus.apps.core.models import Country
from colossus.apps.subscribers.constants import (
ActivityTypes, Status, TemplateKeys, Workflows,
)
from colossus.apps.subscribers.models import (
Activity, Subscriber, SubscriptionFormTemplate, Tag,
)
from colossus.apps.subscribers.subscription_settings import (
SUBSCRIPTION_FORM_TEMPLATE_SETTINGS,
)
from colossus.utils import get_absolute_url, is_uuid
from .charts import (
ListDomainsChart, ListLocationsChart, SubscriptionsSummaryChart,
)
from .forms import (
BulkTagForm, ConfirmSubscriberImportForm, MailingListSMTPForm,
PasteImportSubscribersForm,
)
from .mixins import FormTemplateMixin, MailingListMixin
from .models import MailingList, SubscriberImport
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class MailingListListView(ListView):
model = MailingList
context_object_name = 'mailing_lists'
ordering = ('name',)
paginate_by = 25
def get_context_data(self, **kwargs):
kwargs['menu'] = 'lists'
kwargs['total_count'] = MailingList.objects.count()
return super().get_context_data(**kwargs)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class MailingListCreateView(CreateView):
model = MailingList
fields = ('name', 'slug', 'campaign_default_from_name', 'campaign_default_from_email', 'website_url')
def get_context_data(self, **kwargs):
kwargs['menu'] = 'lists'
return super().get_context_data(**kwargs)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class MailingListDetailView(DetailView):
model = MailingList
context_object_name = 'mailing_list'
def get_context_data(self, **kwargs) -> Dict:
locations = self.object.get_active_subscribers() \
.select_related('location') \
.values('location__country__code', 'location__country__name') \
.annotate(total=Count('location__country__code')) \
.order_by('-total')[:10]
last_campaign = self.object.campaigns.order_by('-send_date').first()
domains = self.object.get_active_subscribers() \
.select_related('domain') \
.values('domain__name') \
.annotate(total=Count('domain__name')) \
.order_by('-total')[:10]
thirty_days_ago = timezone.now() - datetime.timedelta(30)
subscribed_expression = Count('id', filter=Q(activity_type=ActivityTypes.SUBSCRIBED))
unsubscribed_expression = Count('id', filter=Q(activity_type=ActivityTypes.UNSUBSCRIBED))
cleaned_expression = Count('id', filter=Q(activity_type=ActivityTypes.CLEANED))
summary_last_30_days = Activity.objects \
.filter(subscriber__mailing_list=self.object, date__gte=thirty_days_ago) \
.aggregate(subscribed=subscribed_expression,
unsubscribed=unsubscribed_expression,
cleaned=cleaned_expression)
kwargs['menu'] = 'lists'
kwargs['submenu'] = 'details'
kwargs['subscribed_count'] = self.object.subscribers.filter(status=Status.SUBSCRIBED).count()
kwargs['unsubscribed_count'] = self.object.subscribers.filter(status=Status.UNSUBSCRIBED).count()
kwargs['cleaned_count'] = self.object.subscribers.filter(status=Status.CLEANED).count()
kwargs['locations'] = locations
kwargs['last_campaign'] = last_campaign
kwargs['summary_last_30_days'] = summary_last_30_days
kwargs['domains'] = domains
return super().get_context_data(**kwargs)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class MailingListCountryReportView(MailingListMixin, DetailView):
model = MailingList
context_object_name = 'mailing_list'
template_name = 'lists/country_report.html'
def get_context_data(self, **kwargs):
country_code = self.kwargs.get('country_code')
country = get_object_or_404(Country, code=country_code)
country_total_subscribers = self.object.get_active_subscribers() \
.filter(location__country__code=country_code) \
.values('location__country__code') \
.aggregate(total=Count('location__country__code'))
cities = self.object.get_active_subscribers() \
.filter(location__country__code=country_code) \
.select_related('location') \
.values('location__name') \
.annotate(total=Count('location__name')) \
.order_by('-total')[:100]
kwargs['menu'] = 'lists'
kwargs['country'] = country
kwargs['country_total_subscribers'] = country_total_subscribers['total']
kwargs['cities'] = cities
return super().get_context_data(**kwargs)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberListView(MailingListMixin, ListView):
model = Subscriber
context_object_name = 'subscribers'
paginate_by = 100
template_name = 'lists/subscriber_list.html'
def get_context_data(self, **kwargs):
kwargs['submenu'] = 'subscribers'
kwargs['total_count'] = self.model.objects.count()
return super().get_context_data(**kwargs)
def get_queryset(self):
queryset = self.model.objects.filter(mailing_list_id=self.kwargs.get('pk'))
tags_filter = self.request.GET.getlist('tags__in')
if tags_filter:
queryset = queryset.filter(tags__in=tags_filter)
if self.request.GET.get('q', ''):
query = self.request.GET.get('q').strip()
if is_uuid(query):
queryset = queryset.filter(uuid=query)
else:
queryset = queryset.filter(Q(email__icontains=query) | Q(name__icontains=query))
self.extra_context = {
'is_filtered': True,
'query': query
}
return queryset.order_by('optin_date')
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberCreateView(MailingListMixin, CreateView):
model = Subscriber
fields = ('email', 'name')
template_name = 'lists/subscriber_form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.mailing_list_id = self.kwargs.get('pk')
self.object.status = Status.SUBSCRIBED
self.object.save()
return redirect('lists:subscribers', pk=self.kwargs.get('pk'))
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberDetailView(MailingListMixin, DetailView):
model = Subscriber
pk_url_kwarg = 'subscriber_pk'
template_name = 'lists/subscriber_detail.html'
context_object_name = 'subscriber'
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberUpdateView(MailingListMixin, UpdateView):
model = Subscriber
fields = '__all__'
pk_url_kwarg = 'subscriber_pk'
template_name = 'lists/subscriber_form.html'
def get_success_url(self):
return reverse('lists:subscribers', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberDeleteView(MailingListMixin, DeleteView):
model = Subscriber
pk_url_kwarg = 'subscriber_pk'
context_object_name = 'subscriber'
template_name = 'lists/subscriber_confirm_delete.html'
def get_success_url(self):
return reverse('lists:subscribers', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class ImportSubscribersView(MailingListMixin, TemplateView):
template_name = 'lists/import_subscribers.html'
def get_context_data(self, **kwargs):
kwargs['submenu'] = 'subscribers'
return super().get_context_data(**kwargs)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class PasteEmailsImportSubscribersView(MailingListMixin, FormView):
template_name = 'lists/import_subscribers_form.html'
form_class = PasteImportSubscribersForm
extra_context = {'title': _('Paste Emails')}
def form_valid(self, form):
try:
mailing_list_id = self.kwargs.get('pk')
mailing_list = MailingList.objects.only('pk').get(pk=mailing_list_id)
form.import_subscribers(mailing_list)
return redirect('lists:subscribers', pk=mailing_list_id)
except MailingList.DoesNotExist:
raise Http404
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriptionFormsView(MailingListMixin, TemplateView):
template_name = 'lists/subscription_forms.html'
def get_context_data(self, **kwargs):
kwargs['submenu'] = 'forms'
kwargs['sub'] = get_absolute_url('subscribers:subscribe', {'mailing_list_uuid': self.mailing_list.uuid})
kwargs['sub_short'] = get_absolute_url('subscribe_shortcut', {'mailing_list_slug': self.mailing_list.slug})
kwargs['unsub'] = get_absolute_url('subscribers:unsubscribe_manual', {
'mailing_list_uuid': self.mailing_list.uuid
})
kwargs['unsub_short'] = get_absolute_url('unsubscribe_shortcut', {'mailing_list_slug': self.mailing_list.slug})
return super().get_context_data(**kwargs)
class TagMixin:
model = Tag
extra_context = {'submenu': 'tags'}
pk_url_kwarg = 'tag_pk'
def get_queryset(self):
return super().get_queryset().filter(mailing_list_id=self.kwargs.get('pk'))
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class TagListView(TagMixin, MailingListMixin, ListView):
context_object_name = 'tags'
paginate_by = 100
template_name = 'lists/tag_list.html'
def get_queryset(self):
queryset = super().get_queryset()
if self.request.GET.get('q', ''):
query = self.request.GET.get('q').strip()
queryset = queryset.filter(Q(name__icontains=query) | Q(description__icontains=query))
self.extra_context = {
'is_filtered': True,
'query': query
}
queryset = queryset.annotate(subscribers_count=Count('subscribers'))
return queryset.order_by('name')
class BulkTagSubscribersView(LoginRequiredMixin, TagMixin, MailingListMixin, FormView):
form_class = BulkTagForm
context_object_name = 'tag'
template_name = 'lists/bulk_tag_form.html'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['mailing_list'] = self.mailing_list
return kwargs
def form_valid(self, form):
form.tag_subscribers()
return redirect('lists:tags', pk=self.mailing_list.pk)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class TagCreateView(TagMixin, MailingListMixin, CreateView):
fields = ('name', 'description')
context_object_name = 'tag'
template_name = 'lists/tag_form.html'
def form_valid(self, form):
tag = form.save(commit=False)
tag.mailing_list_id = self.kwargs.get('pk')
tag.save()
messages.success(self.request, _('Tag "%(name)s" created with success.') % form.cleaned_data)
return redirect('lists:tags', pk=self.kwargs.get('pk'))
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class TagUpdateView(SuccessMessageMixin, TagMixin, MailingListMixin, UpdateView):
fields = ('name', 'description')
context_object_name = 'tag'
template_name = 'lists/tag_form.html'
success_message = _('Tag "%(name)s" updated with success.')
def get_success_url(self):
return reverse('lists:tags', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class TagDeleteView(TagMixin, MailingListMixin, DeleteView):
context_object_name = 'tag'
template_name = 'lists/tag_confirm_delete.html'
def get_success_url(self):
return reverse('lists:tags', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class AbstractSettingsView(UpdateView):
model = MailingList
context_object_name = 'mailing_list'
template_name = 'lists/settings.html'
def get_context_data(self, **kwargs):
kwargs['menu'] = 'lists'
kwargs['submenu'] = 'settings'
kwargs['subsubmenu'] = self.subsubmenu
kwargs['title'] = self.title
return super().get_context_data(**kwargs)
def get_success_url(self):
return reverse(self.success_url_name, kwargs={'pk': self.kwargs.get('pk')})
class ListSettingsView(AbstractSettingsView):
fields = ('name', 'slug', 'website_url', 'contact_email_address',)
success_url_name = 'lists:settings'
subsubmenu = 'list_settings'
title = _('Settings')
class SubscriptionSettingsView(AbstractSettingsView):
fields = ('list_manager', 'enable_recaptcha', 'recaptcha_site_key', 'recaptcha_secret_key')
success_url_name = 'lists:subscription_settings'
subsubmenu = 'subscription_settings'
title = _('Subscription settings')
class CampaignDefaultsView(AbstractSettingsView):
fields = ('campaign_default_from_name', 'campaign_default_from_email', 'campaign_default_email_subject',)
success_url_name = 'lists:defaults'
subsubmenu = 'defaults'
title = _('Campaign defaults')
class SMTPCredentialsView(AbstractSettingsView):
form_class = MailingListSMTPForm
success_url_name = 'lists:smtp'
subsubmenu = 'smtp'
title = _('SMTP credentials')
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class FormsEditorView(MailingListMixin, TemplateView):
template_name = 'lists/forms_editor.html'
def get_context_data(self, **kwargs):
kwargs['template_keys'] = TemplateKeys
kwargs['workflows'] = Workflows
kwargs['subscription_forms'] = SUBSCRIPTION_FORM_TEMPLATE_SETTINGS
return super().get_context_data(**kwargs)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriptionFormTemplateUpdateView(FormTemplateMixin, MailingListMixin, UpdateView):
model = SubscriptionFormTemplate
context_object_name = 'form_template'
template_name = 'lists/form_template_form.html'
def get_success_url(self):
return reverse('lists:edit_form_template', kwargs=self.kwargs)
def get_context_data(self, **kwargs):
kwargs['template_keys'] = TemplateKeys
kwargs['workflows'] = Workflows
kwargs['subscription_forms'] = SUBSCRIPTION_FORM_TEMPLATE_SETTINGS
return super().get_context_data(**kwargs)
def get_form_class(self):
fields = self.object.settings['fields']
form_class = modelform_factory(self.model, fields=fields)
return form_class
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class ResetFormTemplateView(FormTemplateMixin, MailingListMixin, View):
def post(self, request: HttpRequest, pk: int, form_key: str):
form_template = self.get_object()
form_template.load_defaults()
messages.success(request, gettext('Default template restored with success!'))
return redirect('lists:edit_form_template', pk=pk, form_key=form_key)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class PreviewFormTemplateView(FormTemplateMixin, MailingListMixin, View):
def post(self, request, pk, form_key):
self.form_template = self.get_object()
content = request.POST.get('content_html')
html = self.form_template.render_template({'content': content, 'preview': True})
return HttpResponse(html)
def get(self, request, pk, form_key):
self.form_template = self.get_object()
html = self.form_template.render_template({'preview': True})
return HttpResponse(html)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class CustomizeDesignView(UpdateView):
model = MailingList
fields = ('forms_custom_css', 'forms_custom_header')
context_object_name = 'mailing_list'
template_name = 'lists/customize_design.html'
def get_context_data(self, **kwargs):
kwargs['menu'] = 'lists'
return super().get_context_data(**kwargs)
def get_success_url(self):
return reverse('lists:forms_editor', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberImportView(MailingListMixin, CreateView):
model = SubscriberImport
fields = ('file',)
template_name = 'lists/import_subscribers_form.html'
extra_context = {'title': _('Import CSV File')}
def get_context_data(self, **kwargs):
kwargs['subscriber_imports'] = SubscriberImport.objects.order_by('-upload_date')
return super().get_context_data(**kwargs)
def form_valid(self, form):
mailing_list_id = self.kwargs.get('pk')
subscriber_import = form.save(commit=False)
subscriber_import.user = self.request.user
subscriber_import.mailing_list_id = mailing_list_id
subscriber_import.save()
subscriber_import.set_size()
return redirect('lists:import_preview', pk=mailing_list_id, import_pk=subscriber_import.pk)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberImportPreviewView(MailingListMixin, UpdateView):
model = SubscriberImport
form_class = ConfirmSubscriberImportForm
template_name = 'lists/import_preview.html'
pk_url_kwarg = 'import_pk'
context_object_name = 'subscriber_import'
def get_success_url(self):
submit = self.request.POST.get('submit', 'save')
if submit == 'import':
return reverse('lists:import_queued', kwargs=self.kwargs)
return reverse('lists:csv_import_subscribers', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberImportQueuedView(MailingListMixin, DetailView):
model = SubscriberImport
template_name = 'lists/import_queued.html'
pk_url_kwarg = 'import_pk'
context_object_name = 'subscriber_import'
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriberImportDeleteView(MailingListMixin, DeleteView):
model = SubscriberImport
pk_url_kwarg = 'import_pk'
context_object_name = 'subscriber_import'
template_name = 'lists/subscriber_import_confirm_delete.html'
def get_success_url(self):
return reverse('lists:csv_import_subscribers', kwargs={'pk': self.kwargs.get('pk')})
class ChartView(View):
chart_class: Any
def get(self, request, pk):
try:
mailing_list = MailingList.objects.get(pk=pk)
chart = self.chart_class(mailing_list)
return JsonResponse({'chart': chart.get_settings()})
except MailingList.DoesNotExist:
# bad request status code
return JsonResponse(data={'message': gettext('Invalid mailing list id.')}, status_code=400)
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class SubscriptionsSummaryChartView(ChartView):
chart_class = SubscriptionsSummaryChart
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class ListDomainsChartView(ChartView):
chart_class = ListDomainsChart
@method_decorator(login_required, name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class ListLocationsChartView(ChartView):
chart_class = ListLocationsChart
@login_required
@csrf_exempt
def download_subscriber_import(request, pk, import_pk):
subscriber_import = get_object_or_404(SubscriberImport, pk=import_pk, mailing_list_id=pk)
filename = subscriber_import.file.name.split('/')[-1]
response = HttpResponse(subscriber_import.file.read(), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
return response
| UTF-8 | Python | false | false | 21,683 | py | 43 | views.py | 28 | 0.686067 | 0.68413 | 0 | 568 | 37.174296 | 119 |
promisivia/experience-oim-under-dcm | 11,476,152,617,226 | 6266695cc02c964c6d3129acf9eaeb8057b54d9b | 853882976cffe5cd615a899dd9b438a2b78f3c34 | /BanditAlg/UCB.py | 25a0a7777c5d735cb4cb1eb55a1798c86985902d | []
| no_license | https://github.com/promisivia/experience-oim-under-dcm | 9814d026e15685b7a00085777d2ae9aa430400e1 | 0bac6aef6cbc9b4af152d228b9f2c172c8ce49d1 | refs/heads/main | 2023-02-19T16:21:13.680028 | 2021-01-22T17:56:16 | 2021-01-22T17:56:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import log, inf, sqrt
from random import choice
import itertools
class UCBStruct(object):
def __init__(self, S):
self.S = S
self.totalReward = 0.0
self.numPlayed = 0
self.averageReward = 0.0
self.upperBound = inf
self.p_max = 1
def updateParameters(self, reward, delta):
self.totalReward += reward
self.numPlayed += 1
self.averageReward = self.totalReward / float(self.numPlayed)
self.upperBound = self.averageReward + sqrt(2 * log(1 / delta) / float(self.numPlayed))
print('arm:', self.S, 'with upper bound:', self.upperBound)
def getUpperBound(self):
return self.upperBound
class UCBAlgorithm:
def __init__(self, G, P, seed_size, feedback='edge', delta=1/2000):
self.G = G
self.length = float(len(G.nodes))
self.trueP = P
self.seed_size = seed_size
self.feedback = feedback
self.arms = {}
for nodes in itertools.combinations(list(self.G.nodes()), seed_size):
self.arms[nodes] = UCBStruct(nodes)
self.TotalPlayCounter = 0
self.delta = delta
def decide(self):
self.TotalPlayCounter += 1
max_upper_bound = 0
opt_set = []
for key, value in self.arms.items():
current_upper_bound = value.getUpperBound()
if current_upper_bound > max_upper_bound:
opt_set.clear()
opt_set.append(key)
max_upper_bound = current_upper_bound
elif current_upper_bound == max_upper_bound:
opt_set.append(key)
return list(choice(opt_set))
def updateParameters(self, S, live_nodes, live_edges, iter_):
reward = len(live_nodes)
self.arms[tuple(S)].updateParameters(reward=reward, delta=self.delta)
| UTF-8 | Python | false | false | 1,846 | py | 36 | UCB.py | 35 | 0.593174 | 0.583965 | 0 | 55 | 32.563636 | 95 |
umentu/django_tutorial | 4,801,773,480,697 | dea609b7dd87718eb00ff19eb9a9407740fe642b | 7ad9a64bebca0cc20d6bd0e466635d72be367d55 | /polls/migrations/0006_auto_20180523_0950.py | 621efdee22609d30f2ca282dfb937ff7f8fc75ed | []
| no_license | https://github.com/umentu/django_tutorial | f2655a95f59989eebd70e8d3f4f1dfd1d88d02ba | 75aaa2c7bcb74c63c8f781051240aab53d62b1d7 | refs/heads/master | 2020-03-18T11:07:37.075205 | 2018-05-24T02:49:51 | 2018-05-24T02:49:51 | 134,652,005 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.4 on 2018-05-23 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0005_auto_20180523_0926'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image_url',
field=models.ImageField(upload_to='polls/'),
),
]
| UTF-8 | Python | false | false | 392 | py | 9 | 0006_auto_20180523_0950.py | 6 | 0.581633 | 0.502551 | 0 | 18 | 20.777778 | 56 |
gpalazzo/undergrad_exercises | 6,880,537,631,716 | ed889da827fb6b9800afbb1aeac1ca8b301486a2 | d99b2f5ebc15e089197ce1fc181be9569a57128d | /iot_device/src/ubidots_platform.py | f6ee48f0b855c5bfb63d2c732d1cf853d3ec87a0 | []
| no_license | https://github.com/gpalazzo/undergrad_exercises | 12b1229a99312ac10d50e87affcf3fbf9b36dcaa | 367f402303c6add90b755b47a239a71658662de2 | refs/heads/master | 2022-03-21T07:52:39.435446 | 2021-08-02T19:51:06 | 2021-08-02T19:51:06 | 213,013,132 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from time import sleep
TOKEN = "my_token"
DEVICE_LABEL = "DrAssist"
VARIABLE_LABEL_1 = "temperatura"
VARIABLE_LABEL_2 = "temperatura-media"
VARIABLE_LABEL_3 = "bpm"
VARIABLE_LABEL_4 = "spo2"
VARIABLE_LABEL_5 = "status-paciente"
def build_payload(temp_atual, temp_media, bpm, spo2, status_paciente):
# Creates two random values for sending data
payload = {
VARIABLE_LABEL_1: temp_atual,
VARIABLE_LABEL_2: temp_media,
VARIABLE_LABEL_3: bpm,
VARIABLE_LABEL_4: spo2,
VARIABLE_LABEL_5: status_paciente}
return payload
def post_request(payload):
# Creates the headers for the HTTP requests
url = "http://industrial.api.ubidots.com/api/v1.6/"
url = "{}devices/{}".format(url, DEVICE_LABEL)
headers = {"X-Auth-Token": TOKEN, "Content-Type": "application/json"}
# Makes the HTTP requests
status = 400
attempts = 0
while status >= 400 and attempts <= 5:
req = requests.post(url=url, headers=headers, json=payload)
status = req.status_code
attempts += 1
sleep(1)
# Processes results
if status >= 400:
print("[ERROR] Could not send data after 5 attempts, please check\
your token credentials and internet connection")
return False
print("[INFO] request made properly, your device is updated")
return True
| UTF-8 | Python | false | false | 1,377 | py | 73 | ubidots_platform.py | 8 | 0.655047 | 0.633987 | 0 | 46 | 28.934783 | 74 |
alexasp/botty-plugins | 7,198,365,220,249 | 19bc3a84e115a5e918d6c0b96c594166125fc3e0 | ef1915d94587b2437c7192616f9d6cd4ad501a6d | /whatis/__init__.py | 0ef1b06b40f961d96dd20558a2e656251e10159e | []
| no_license | https://github.com/alexasp/botty-plugins | f9ed2133ca717284f527c1abf98f1aafe0c14194 | 293e0b7c77d1a1a3b06160f27eff6d1da935189c | refs/heads/master | 2022-09-02T09:35:00.564188 | 2022-08-18T13:31:12 | 2022-08-18T13:31:12 | 81,880,237 | 0 | 2 | null | false | 2017-02-24T10:03:17 | 2017-02-13T22:50:46 | 2017-02-13T22:52:57 | 2017-02-24T10:03:17 | 30 | 0 | 2 | 0 | Python | null | null | import asyncio, aiohttp, re, os
import plugins
import logging
import random
import io
logger = logging.getLogger(__name__)
def _initialise(bot):
plugins.register_user_command(["whatis"])
def whatis(bot, event, *args):
if event.user.is_self:
return
search_term = "+".join(args).lower()
url = 'https://en.wikipedia.org/w/api.php?action=query&format=json&prop=extracts&titles='+search_term+'&formatversion=2&exsentences=1&explaintext=1&exsectionformat=plain'
r = yield from aiohttp.request('get', url)
r_json = yield from r.json()
logger.info(url)
if len(r_json['query']) == 0:
yield from bot.coro_send_message(event.conv_id, 'Sorry, '+ event.user.full_name +' I duno what '+ search_term +' is ¯\\\_(ツ)_/¯')
return
page = r_json['query']['pages'][random.randint(0,len(r_json['query']['pages'])-1)]
if "missing" in page:
yield from bot.coro_send_message(event.conv_id, 'Sorry, '+ event.user.full_name +' I duno what '+ search_term +' is ¯\\\_(ツ)_/¯')
return
yield from bot.coro_send_message(event.conv_id, page['extract'])
| UTF-8 | Python | false | false | 1,151 | py | 26 | __init__.py | 25 | 0.627297 | 0.622047 | 0 | 32 | 33.71875 | 174 |
phi99/Vulnerability-Scanner | 944,892,824,558 | 979aae67731919a4930394e5c3e7d198d94011be | f3c89a4bb755c57f36b274c91d9553de6dc6e1eb | /scanner.py | 489f47f585ae81143345e7f5010d811725dae131 | []
| no_license | https://github.com/phi99/Vulnerability-Scanner | 18672d34c2bcadbe057f178d6e36ab9fe4b16502 | dccf9a7f91dd7840bb90ff7c76ab78f8fa341c61 | refs/heads/master | 2020-06-20T10:27:51.313864 | 2019-07-16T01:01:15 | 2019-07-16T01:01:15 | 197,094,710 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket, subprocess,sys
from datetime import datetime
subprocess.call('clear',shell=True)
rmip = raw_input("t Enter the host IP to scan:")
r1 = int(raw_input("t Enter the start port numbert"))
r2 = int (raw_input("t Enter the last port numbert"))
print "*"*40
print "n Scanner is working on ",rmip
print "*"*40
t1= datetime.now()
try:
for port in range(r1,r2):
sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = sock.connect_ex((rmip,port))
if result==0:
print "Port Open:-->t", port
# print desc[port]
sock.close()
except KeyboardInterrupt:
print "You stop this "
sys.exit()
except Exception as e :
print e
sys.exit()
t2= datetime.now()
total =t2-t1
print "scanning complete in " , total
| UTF-8 | Python | false | false | 785 | py | 1 | scanner.py | 1 | 0.680255 | 0.66242 | 0 | 35 | 21.428571 | 58 |
taduyhc/Code.ptit.TaDuy | 2,319,282,386,785 | ad3e2cd565c200bb48cf6f5c0db61caa5aef5dde | 28a28b7ffb2c5a576b24dffde092665d3f76a2ba | /Python/PY02007.py | 705bdb60b9527b178cdcb21a29f99cc429941e5c | []
| no_license | https://github.com/taduyhc/Code.ptit.TaDuy | 1a8992b2e32091bdd5453f71f9389f1d37402e07 | 1d27371a87c9669bb7d6db6877ac6f4881eeaa9c | refs/heads/main | 2023-08-19T10:30:27.253617 | 2023-03-18T14:54:56 | 2023-03-18T14:54:56 | 315,073,887 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | test = 10
s = set()
while test > 0:
data = input()
base = data.split()
for i in range(0,len(base)):
s.add(int(base[i]) % 42)
test -= len(base)
print(len(s))
| UTF-8 | Python | false | false | 190 | py | 587 | PY02007.py | 582 | 0.5 | 0.468421 | 0 | 9 | 19.111111 | 32 |
MrEnvision/pac_learn_DOTAs | 13,340,168,449,252 | 9c6369176270fba2170b88819a0080e584583923 | a9550599683236aad300c93d4c9f1b8828d0b65b | /main.py | da67721efae6f13af57ee511acf88c28de9b59ed | [
"MIT"
]
| permissive | https://github.com/MrEnvision/pac_learn_DOTAs | 7397639e64855ababce6a857f5ddb2ee95b6e9e6 | b6787b5f1c137763f226e4af3b06b4e071df9070 | refs/heads/master | 2023-02-09T00:38:26.100656 | 2022-05-03T12:26:49 | 2022-05-03T12:26:49 | 297,907,611 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # main file
import sys
import random
import json
import time
from common.system import build_system
from common.validate import validate
from common.make_pic import make_system, make_hypothesis
from normal_learning.learnOTA import learnOTA_normal
# from normal_learning.debug.learnOTA import learnOTA_normal
# from normal_learning.learnOTA_level import learnOTA_normal_level
# from normal_learning.debug.learnOTA_level import learnOTA_normal_level
# from normal_learning.learnOTA_pruning import learnOTA_normal_pruning
from smart_learning.learnOTA import learnOTA_smart
def main():
# get model information and build target system
with open(model_file, 'r') as json_model:
model = json.load(json_model)
system = build_system(model)
make_system(system, result_path, '/model_target')
# get prior information required for learning
with open(precondition_file, 'r') as json_precondition:
information = json.load(json_precondition)
actions = information["inputs"]
upper_guard = information["upperGuard"] # upper bound of guard time
epsilon = information["epsilon"] # accuracy
delta = information["delta"] # confidence
state_num = information["stateNum"] # not necessary
# pac learning of DOTAs
start_time = time.time()
print("********** learning starting *************")
if teacher_type == "smart_teacher":
comparator_flag = True
learned_system, mq_num, eq_num, test_num, test_num_cache, action_num, table_num = learnOTA_smart(system, actions, upper_guard, epsilon, delta, state_num, comparator_flag, debug_flag)
elif teacher_type == "normal_teacher":
learned_system, mq_num, eq_num, test_num, test_num_cache, action_num, table_num = learnOTA_normal(system, actions, upper_guard, epsilon, delta, state_num, debug_flag)
# learned_system, mq_num, eq_num, test_num, test_num_cache, action_num, table_num, level, hpy_num = learnOTA_normal_level(system, actions, upper_guard, epsilon, delta, state_num, debug_flag)
# learned_system, mq_num, eq_num, test_num, test_num_cache, action_num, table_num = learnOTA_normal_pruning(system, actions, upper_guard, epsilon, delta, state_num, debug_flag)
else:
raise Exception('Teacher type only allows two options: smart_teacher and normal_teacher.')
end_time = time.time()
# learning result
if learned_system is None:
print("Error! Learning Failed.")
print("*********** learning ending *************")
return {"result": "Failed"}
else:
# validate
correct_flag, passing_rate = validate(learned_system, system, upper_guard)
make_hypothesis(learned_system, result_path, '/model_hypothesis')
print("———————————————————————————————————————————")
print("Succeed! The result is as follows:")
print("Total time of learning: " + str(end_time - start_time))
print("Total number of MQs (no-cache): " + str(mq_num))
print("Total number of EQs (no-cache): " + str(eq_num))
print("Total number of tests (no-cache): " + str(test_num))
print("Total number of tests (with-cache): " + str(test_num_cache))
print("Total number of actions: " + str(action_num))
print("Total number of tables explored: " + str(table_num))
print("Completely correct: " + str(correct_flag) + " Testing pass rate: " + str(passing_rate))
# print("level:" + str(level) + " hypothesis number of current level: " + str(hpy_num))
print("*********** learning ending *************")
trans = []
for t in learned_system.trans:
trans.append([str(t.tran_id), str(t.source), str(t.action), t.show_guards(), str(t.reset), str(t.target)])
result_obj = {
"result": "Success",
"learningTime": end_time - start_time,
"mqNum": mq_num,
"eqNum": eq_num,
"testNum": test_num,
"testNumCache": test_num_cache,
"actionNum": action_num,
"tableNum": table_num,
"correct": correct_flag,
"passingRate": passing_rate,
# "level": level,
# "hpyNum_of_level": hpy_num,
"model": {
"actions": learned_system.actions,
"states": learned_system.states,
"initState": learned_system.init_state,
"acceptStates": learned_system.accept_states,
"sinkState": learned_system.sink_state,
"trans": trans
}
}
return result_obj
if __name__ == '__main__':
### used to reproduce experimental results
random.seed(3)
### file directory
# file_path = sys.argv[1]
file_path = "benchmarks/4_2_10/4_2_10-10"
# target model file
model_file = file_path + "/model.json"
# prior information required for learning
precondition_file = file_path + "/precondition.json"
### teacher type - smart_teacher / normal_teacher
# teacher_type = sys.argv[2]
teacher_type = "smart_teacher"
# results file directory
result_path = 'results/' + teacher_type + '/' + file_path
# debug mode
debug_flag = True
### start running experiment
result = main()
with open(result_path + "/result.json", 'w') as json_file:
json_file.write(json.dumps(result, indent=2))
| UTF-8 | Python | false | false | 5,485 | py | 26 | main.py | 22 | 0.619744 | 0.617151 | 0 | 120 | 43.991667 | 198 |
takaya-0808/atcoder | 7,825,430,451,567 | 62ce3f7c2d7188b5bfab70805bb688b719ebf300 | 9a8cffe460d906aa9252c2894d22c8e06bf646db | /abc150/c.py | 967fd4f09aca6418dd60efb26f5541a0cefe9650 | []
| no_license | https://github.com/takaya-0808/atcoder | e955f7439e3ed573986505a45f717587ff073d55 | 6b302e73c1aa06cdc8220e3c0f43c7a6a7bccaa5 | refs/heads/master | 2021-04-08T07:28:50.904649 | 2020-08-16T05:04:14 | 2020-08-16T05:04:14 | 248,753,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from itertools import permutations
def main():
p_score, q_score, count = 0,0,0
string = ""
num = int(input())
p_num = tuple(list(map(str, input().split())))
q_num = tuple(list(map(str, input().split())))
for i in range(1,num+1):
string = string + str(i)
list_ = list(permutations(string, num))
for i in list_:
count += 1
if p_num == i:
q_score = count
if q_num == i:
p_score = count
print(abs(q_score - p_score))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 497 | py | 40 | c.py | 40 | 0.579477 | 0.567404 | 0 | 28 | 16.75 | 47 |
vincentsarago/wagyu | 18,502,719,125,855 | 4792377160a3a613d9d5aa1cd79bdc22c7d6fad9 | cd3df53a432d35e2fe7b4e4f9bbe62222235a85b | /wagyu/hints.py | 9fc11570b86cdd943e2be427c8605285c5717447 | [
"MIT"
]
| permissive | https://github.com/vincentsarago/wagyu | 00ccbe6c9d101724483bde00e10ef512d2c95f9a | f6dce8d119fafa190d07f042ff6c4d5729a4c1e6 | refs/heads/master | 2023-01-20T06:26:27.475502 | 2020-11-21T04:57:01 | 2020-11-21T04:57:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from numbers import Real
from typing import TypeVar
Coordinate = Real
Domain = TypeVar('Domain')
| UTF-8 | Python | false | false | 98 | py | 191 | hints.py | 185 | 0.785714 | 0.785714 | 0 | 5 | 18.6 | 26 |
orrak/Advent-Of-Code-2019 | 2,800,318,706,521 | 9d2744366b925334dbab29f237e2075849db98e2 | a0f4c4a4da54e0b8467254c40255e4e495e8e653 | /day2.py | 31676b745fa9bff1da790cec65ed13a0948b17c1 | []
| no_license | https://github.com/orrak/Advent-Of-Code-2019 | b8fd179227c9dd70abb40b3760cdc480151b7b11 | 8d43b49e17f0b18b4ef27a79f2f6d52ee8ab31be | refs/heads/master | 2021-07-14T02:16:11.227468 | 2020-12-07T01:50:05 | 2020-12-07T01:50:05 | 225,109,114 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def func(lst):
i = 0
while i < len(lst):
if lst[i] == 1:
lst[lst[i+3]] = lst[lst[i+1]] + lst[lst[i+2]]
i += 4
elif lst[i] == 2:
lst[lst[i+3]] = lst[lst[i+1]] * lst[lst[i+2]]
i += 4
elif lst[i] == 99:
i = len(lst)+1
else:
i = len(lst)+1
return lst
# Part 1
f = open('input2.txt', 'r').readlines()
lst = f[0].split(',')
lst[1] = 12
lst[2] = 2
for i in range(len(lst)):
lst[i] = int(lst[i])
lst = func(lst)
print(lst[0])
# Part 2
for noun in range(100):
for verb in range(100):
f = open('input2.txt', 'r').readlines()
lst = f[0].split(',')
lst[1] = noun
lst[2] = verb
for i in range(len(lst)):
lst[i] = int(lst[i])
lst = func(lst)
if lst[0] == 19690720:
print(100*noun + verb)
| UTF-8 | Python | false | false | 889 | py | 12 | day2.py | 12 | 0.433071 | 0.380202 | 0 | 46 | 18.326087 | 57 |
ankitr/N-lang | 3,435,973,883,834 | ddbd38313f648e859cbe9ffc4aac80e4b8748d6e | 6b4db50b0a7cfbc0f1520a6dbee61489d21d2096 | /python/FileIO.py | 880b9d51a20a32829002175626976807d8c97970 | [
"MIT"
]
| permissive | https://github.com/ankitr/N-lang | a3fc66296716baa22154173da38e9755cee9f32e | ac6d20a5b688d5281a86dfdaba35938eaa402500 | refs/heads/main | 2023-02-07T07:06:58.237314 | 2020-12-25T22:16:30 | 2020-12-25T22:16:30 | 324,441,431 | 0 | 0 | MIT | true | 2020-12-25T22:16:54 | 2020-12-25T22:16:54 | 2020-12-25T08:55:31 | 2020-12-25T01:20:26 | 6,198 | 0 | 0 | 0 | null | false | false | def write(args):
with open(str(args[0])[1:-1], "w+") as f:
f.write(''.join(args[1:])[1:-1])
def append(args):
with open(str(args[0])[1:-1], "a+") as f:
f.write(''.join(args[1:])[1:-1])
def read(args):
with open(str(args[0])[1:-1], "r", encoding="utf-8") as f:
return f.read()
def _values():
return {"write": None, "append": None, "read": None} | UTF-8 | Python | false | false | 356 | py | 24 | FileIO.py | 15 | 0.564607 | 0.519663 | 0 | 14 | 24.5 | 59 |
cpyip5/PythonCode | 7,541,962,575,565 | 49790d8725ca8ddd9b3f34cd6228b43da6b4dbbc | 07ab93fc3f84f7307587f1a515b17194b05446d7 | /PythonCode/Playground.py | f3000fd50338c02b6963d8ab8a87549bb102ef11 | []
| no_license | https://github.com/cpyip5/PythonCode | 31d629fd10bb0590ff6c8475549f35744a0fedf4 | 32c0bc9b96172fb3c497bbd9433649fcd56b1763 | refs/heads/master | 2020-06-01T06:45:08.368143 | 2019-09-20T06:25:42 | 2019-09-20T06:25:42 | 190,684,895 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
print( type(tf) )
| UTF-8 | Python | false | false | 45 | py | 5 | Playground.py | 4 | 0.688889 | 0.688889 | 0 | 4 | 10.25 | 23 |
zyn-thakur/hms-1 | 14,362,370,666,277 | cd4f8e43e2aba4f266bece1464f6ac752a798860 | 4ce2105208b97e6077f6e81f4b3d0416c27fc1f5 | /copyFrontend.py | 1b205d7bebc66d68705e3a4e90f1eafca59b60f3 | []
| no_license | https://github.com/zyn-thakur/hms-1 | 48bbba640f8409f178bdf5673770cc4da31dc00a | 1af7921317360cc2d736712ae11661b9a2f1ac81 | refs/heads/master | 2020-12-05T22:19:40.130230 | 2020-01-07T06:39:02 | 2020-01-07T06:39:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os,shutil
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
if os.path.exists(d):
shutil.copy2(s, d)
else:
print("Can not delete the file as it doesn't exists")
shutil.copy2(s, d)
shutil.rmtree('apilogin/login/templates')
os.mkdir('apilogin/login/templates')
copytree('frontend/target/dist/','apilogin/login/templates')
print("Done")
| UTF-8 | Python | false | false | 618 | py | 19 | copyFrontend.py | 18 | 0.588997 | 0.585761 | 0 | 19 | 31.526316 | 69 |
knbk/web-technology | 6,287,832,146,050 | 00b28161ec53ef01236f1cf0138cb2ae8b5c9d91 | 892f3842816f0cefd69c8dc1619c289cc90b9b78 | /accounts/urls.py | 4a1458d1b48e8f437e347d0cb8664c5560b7c9f3 | []
| no_license | https://github.com/knbk/web-technology | 3145fc6e856d41e60529d0ef8f2280aa75516fda | a07f027ab3094e80355b09dbb1e3dd93bca367b3 | refs/heads/master | 2021-01-12T02:05:37.144117 | 2017-11-14T16:34:55 | 2017-11-14T16:34:55 | 78,470,958 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from . import views
import django.contrib.auth.views
urlpatterns = [
url(r'^login/$', django.contrib.auth.views.login, name='login'),
url(r'^logout/$', django.contrib.auth.views.logout, name='logout', kwargs={'next_page': '/'}),
url(r'^register/$', views.register, name='register'),
]
| UTF-8 | Python | false | false | 332 | py | 21 | urls.py | 11 | 0.677711 | 0.677711 | 0 | 10 | 32.2 | 98 |
felixdittrich92/DeepLearning-tensorflow-keras | 249,108,142,281 | 14e06da90ab3c2119c59b21793d429ad445e24c6 | 427a4c3ed88246a1727f58571788d90996065af7 | /8_DeepLearning-Autoencoder/Denoise_images/denoise_images.py | c4f881b3b23c9cfb8bdf5f8b2493bc263ff594d1 | [
"Apache-2.0"
]
| permissive | https://github.com/felixdittrich92/DeepLearning-tensorflow-keras | 09d2010f7d2a36befc14e2f0d07ca053c7329ba8 | 2880d8ed28ba87f28851affa92b6fa99d2e47be9 | refs/heads/master | 2022-12-30T11:46:22.729390 | 2020-10-22T11:56:22 | 2020-10-22T11:56:22 | 248,968,257 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# Fix CuDnn problem
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError as e:
print(e)
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from mnistData import *
# Load MNIST dataset
data = MNIST()
x_train, _ = data.get_train_set()
x_test, _ = data.get_test_set()
x_train_noise = x_train + 0.1 * np.random.normal(size=(x_train.shape))
x_test_noise = x_test + 0.1* np.random.normal(size=(x_test.shape))
plt.imshow(x_test_noise[0].reshape(28,28), cmap="gray")
plt.show()
# Encoded dimension
encoding_dim = 128
# Keras Model: Autoencoder
# Input Tensors
input_img = Input(shape=(28,28,1,))
# Encoder Part
x = Conv2D(8, kernel_size=3, activation="relu", padding="same")(input_img) # 28x28x8
x = MaxPooling2D(padding="same")(x) # 14x14x8
x = Conv2D(4, kernel_size=3, activation="relu", padding="same")(x) # 14x14x4
x = MaxPooling2D(padding="same")(x) # 7x7x4
encoded = Conv2D(2, kernel_size=3, activation="relu", padding="same")(x) # 7x7x2
# Decoder Part
x = Conv2D(4, kernel_size=3, activation="relu", padding="same")(encoded) # 7x7x4
x = UpSampling2D()(x) # 14x14x4
x = Conv2D(8, kernel_size=3, activation="relu", padding="same")(x) # 14x14x8
x = UpSampling2D()(x) # 28x28x8
# tanh works better here!
decoded = Conv2D(1, kernel_size=3, activation="tanh", padding="same")(x) # 28x28x1
# Output Tensors
autoencoder = Model(inputs=input_img, outputs=decoded)
# Training
autoencoder.compile(optimizer="adam", loss="mse")
autoencoder.fit(x_train_noise, x_train,
epochs=10,
batch_size=256,
validation_data=(x_test_noise, x_test))
# Testing
test_images = x_test_noise[:10]
decoded_imgs = autoencoder.predict(test_images)
# PLot test images
plt.figure(figsize=(12,6))
for i in range(10):
# Original image
ax = plt.subplot(2 , 10, i+1)
plt.imshow(test_images[i].reshape(28,28), cmap="gray")
# Decoded image
ax = plt.subplot(2 , 10, i+1+10)
plt.imshow(decoded_imgs[i].reshape(28,28), cmap="gray")
plt.show() | UTF-8 | Python | false | false | 2,235 | py | 70 | denoise_images.py | 66 | 0.685011 | 0.634004 | 0 | 72 | 30.055556 | 84 |
romanukov/news-parser | 17,987,323,066,523 | 9d5db64f08b800e0b18d54e55a8276194e1d8c38 | a7d0cf254f31f26eb6cc50289c3cd217efeb4a5e | /backend/src/telegram_watcher/migrations/0025_auto_20190215_1432.py | c36d07364fbe08126472f779048718c17b942b78 | []
| no_license | https://github.com/romanukov/news-parser | a6581b10f8480442e0aa5a24a45fdac02a1c5040 | 5430a350a44f22612a38d33c13be75c73a31bbde | refs/heads/master | 2023-01-11T22:26:44.243062 | 2020-04-09T14:51:56 | 2020-04-09T14:51:56 | 254,399,603 | 0 | 1 | null | false | 2023-01-04T14:53:45 | 2020-04-09T14:49:55 | 2020-04-09T14:52:04 | 2023-01-04T14:53:45 | 4,486 | 0 | 1 | 65 | Python | false | false | # Generated by Django 2.0.3 on 2019-02-15 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('telegram_watcher', '0024_auto_20190215_1428'),
]
operations = [
migrations.AddIndex(
model_name='messagefile',
index=models.Index(fields=['message'], name='telegram_wa_message_c5a656_idx'),
),
]
| UTF-8 | Python | false | false | 411 | py | 154 | 0025_auto_20190215_1432.py | 112 | 0.620438 | 0.53528 | 0 | 17 | 23.176471 | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.