repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Sandy4321/CS784-Project | 8,650,064,138,633 | 128f5f989d1a5771578e6507748f69172a119233 | 74dcbb1d3fe56c57cf0d9c0846b69fe949119758 | /information_extraction.py | fdce77cf8915ecb62c2f4b5e5dceef3e36354306 | [] | no_license | https://github.com/Sandy4321/CS784-Project | 1e0a25525c3d9976c1ec313225ea249fe215ce71 | 9cc1be0a8f47ff6f3042961f893728f8441ff8ee | refs/heads/master | 2021-01-13T04:09:03.662740 | 2016-05-11T22:58:41 | 2016-05-11T22:58:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'wintere'
import csv
import lxml
import datrie
import re
import string
import sys
from collections import defaultdict
from bs4 import BeautifulSoup
import pickle
class InformationExtractor:
def __init__(self):
#initialize brand trie
brand_trie = datrie.Trie(string.printable)
syn_dict = dict()
with open('big_dict.csv', 'r', encoding='latin-1') as brand_dict_csv_file:
brand_dict_reader = csv.reader(brand_dict_csv_file, delimiter=',', quotechar='"')
for brand in brand_dict_reader:
b_name = brand[0]
b_name = b_name.title()
# Add permutations of the brand name to our trie object.
# These will help us catch things like different character casing, spacing, etc.
smushed = ''.join(b_name.split(' '))
dehyphen = ''.join(b_name.split('-'))
permutations = [smushed.upper(),dehyphen.upper()]
for permutation in permutations:
brand_trie[permutation] = int(brand[1])
#add a key, value pair that links transformed brand name to the original
syn_dict[permutation] = b_name
# Also record the frequency of each brand name to our trie
brand_trie[b_name] = int(brand[1])
self.brand_trie = brand_trie
self.syn_dict = syn_dict
#color dict
self.colors = []
with open('colors.txt','r') as c_file:
for c in c_file.readlines():
self.colors.append(c.strip('\n'))
self.longd_tfidf = pickle.load(open("tfidf_longd.p", "rb"))
self.pname_tfidf = pickle.load(open("tfidf_pname.p", "rb"))
def text_from_html(self, description):
if len(description) < 5:
return description.lower()
try:
html = BeautifulSoup(description, "lxml")
# html = BeautifulSoup(description)
text = html.getText(' ')
if text is None:
return description.lower()
else:
text = re.sub(r'[^\x00-\x7F]+',' ', text)
return text.lower()
except UserWarning:
return description.lower()
def standardizer(self,string):
brand_dict = {'cables to go': 'c2g', 'startech.com': 'startech', 'pny technologies': 'pny', 'everki usa inc':'everki','rubbermaid home': 'rubbermaid', 'tripp-lite':'tripp lite', 'hewlett packard':'hp', 'buffalo technology':'buffalo', 'officemate international corp':'officemate', 'phillips monitors':'phillips', 'pyle audio':'pyle'}
for key, value in brand_dict.items():
if key in string:
string = string.replace(key, value)
return string
# a cheap haaaack
def brand_adjuster(self, d, ld=False):
if ld:
for entry in ['brand', 'product name', 'manufacturer', 'product short description', 'product long description', 'brand name']:
if entry in d:
val = d[entry]
val = self.standardizer(val)
d[entry] = val
else:
for entry in ['brand', 'product name', 'manufacturer', 'product short description', 'product long description', 'brand name']:
if entry in d:
val = d[entry][0]
val = self.standardizer(val)
d[entry] = [val]
return d
def color_from_name(self, product_name):
colors = []
product_name = product_name.lower()
product_name_list = product_name.split()
for i in product_name_list:
if i in self.colors:
if i not in colors:
colors.append(i)
return colors
def brand_from_string(self, product_name):
product_name = product_name.upper()
product_name = re.sub(r'\|\(\)',' ', product_name)
substrings = product_name.split(' ')
s_array = []
# Because prefix only recognize substrings at the beginning of a string, divide string into substrings
# to recognize brand names anywhere in the string
for i in range(len(substrings) - 1):
s_array.append(' '.join(substrings[i:]))
# Identify which strings are candidates for brand name
cands = []
for substring in s_array:
# Get candidates from prefix tree
cand = self.brand_trie.prefixes(substring)
final_cands = set()
for c in cand:
sub = False
for st in substrings:
if (c in st) and (len(c) < len(st)):
sub = True
# Remove candidates that are less than 1 word (ie 'Sm' for 'Smart Technologies'
if (c == st):
break
if sub == False:
# Return regularized versions of 'synonyms', ie. Cooler Master for Cooler Master
if c in self.syn_dict:
final_cands.add(self.syn_dict[c])
else:
final_cands.add(c)
if len(cand) > 0:
#Add acceptable candidates to final list
cands.extend(final_cands)
# Select the longest candidate at the earliest index of all candidates
chosen = ""
candindex = defaultdict(list)
lower_name = product_name.lower()
for candidate in cands:
index = lower_name.find(candidate.lower())
candindex[index].append(candidate)
if len(candindex) > 0:
min_index = min(candindex)
chosen = candindex[min_index][0]
for candidate in candindex[min_index]:
if len(candidate) > len(chosen):
chosen = candidate
return chosen.lower()
#moved from feature_operations for better modularity
def unitsFromString(self, tokens):
measurement_units = ['khz', 'mhz', 'ghz', 'watt', 'nm', 'um', 'mm', 'cm', 'm', 'km', 'ft', 'in', 's', 'ms', 'mb', 'gb', 'tb', 'gb/s', 'mb/s', 'mbps', 'awg', 'a', 'w', 'g', 'lb', 'dba', 'cfm', 'rpm', 'amp', 'mah', 'watts', 'vac','nits','volts','inches','pounds','ounces','lbs']
units = []
for index in range(0, len(tokens)):
token = tokens[index].lower()
# Look for units split across multiple tokens
if re.match("^[0-9\.]+$", token):
if index < len(tokens) - 1:
nextToken = str(tokens[index + 1]).lower().replace(".", "")
if nextToken in measurement_units:
unit_value = re.sub(r'\.[0]+', "", token) # Remove any trailing decimal points + 0s
# print("Token=" + str(token) + ", unit value=" + str(unit_value))
units.append(str(unit_value + " " + nextToken))
# Also look for units compacted into a single token
elif re.match("^[0-9\.]+(\s)*[a-z\./]+$", token):
unit_data = re.match("^([0-9\.]+)[\s]*([a-z\./]+)$", token)
if str(unit_data.groups(0)[1]) in measurement_units:
unit_value = re.sub(r'\.[0]+', "",
unit_data.groups(0)[0]) # Remove any trailing decimal points + 0s
# print("Token=" + str(token) + ", unit value=" + str(unit_value))
units.append(str(unit_value) + " " + str(unit_data.groups(0)[1]))
return units
| UTF-8 | Python | false | false | 7,770 | py | 16 | information_extraction.py | 12 | 0.516216 | 0.511712 | 0.000257 | 168 | 44.25 | 340 |
JaJasiok/akai-rekrutacja | 18,708,877,572,381 | 04a339775490632d249090c25b4d395b6a3dd7a0 | a4f12f6a375eb7cda44aaa5aa988fbe7c7b5863d | /python/tasker/src/json/Exporter.py | 4efd2b93715bb2f02ffcb5917092828491d9a0c0 | [] | no_license | https://github.com/JaJasiok/akai-rekrutacja | 3429486ab4a0e976856d579897653fdb4d95c4d1 | 7979a36ce0cded43cac4dffb849832470acc28e9 | refs/heads/main | 2023-08-26T10:01:48.280274 | 2021-11-06T20:47:20 | 2021-11-06T20:47:20 | 425,337,653 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
class Exporter:
def __init__(self):
pass
def save_tasks(self, tasks):
# TODO zapisz taski do pliku tutaj
with open("taski.json", "w") as f:
json.dump(tasks, f, indent = 4)
f.close()
| UTF-8 | Python | false | false | 250 | py | 6 | Exporter.py | 4 | 0.536 | 0.532 | 0 | 13 | 18.230769 | 43 |
wbrs-codestellation-2018/audio-recognizer | 14,405,320,339,108 | 877431f4f723f5008cf0f23e30534b1fb2139d85 | f1bf06136897041f43a24a5e46a7e02b45b895c9 | /setup.py | dbb7d24dc675c8b62a907c9952f2d3434abf8f47 | [] | no_license | https://github.com/wbrs-codestellation-2018/audio-recognizer | 36e2d09047aef079ec9911c1ddda5c2ad3b6937a | 55e3d0feb9e171d5dcd156fcb069595623bdaf6f | refs/heads/master | 2020-04-05T16:27:57.967779 | 2018-11-10T20:54:06 | 2018-11-10T20:54:06 | 157,013,804 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
setup(name='audio_recognizer',
version='0.1',
description='Gets genre info from files',
url='https://github.com/wbrs-codestellation-2018/audio-recognizer',
author='Sam Stern',
author_email='sternj@brandeis.edu',
license='MIT',
packages=['audio_recognizer'],
dependency_links=['https://github.com/acrcloud/acrcloud_sdk_python/tarball/master'],
zip_safe=False) | UTF-8 | Python | false | false | 441 | py | 2 | setup.py | 1 | 0.678005 | 0.664399 | 0 | 11 | 39.181818 | 90 |
Kunstenpunt/havelovewilltravel | 17,497,696,786,688 | f15d727788cc2c023fccf4b57e7f639a4c074680 | c1c620a25217d979c6926a3377862e314f6593f6 | /hlwtadmin/migrations/0043_auto_20210126_0833.py | 4dc56cc86658a0f3c11c38a60713006d041685ff | [
"Apache-2.0"
] | permissive | https://github.com/Kunstenpunt/havelovewilltravel | ab47ed097c015c8243412d8d5b375837fb8f8232 | f7ab8b6c952f90e6688f5e021245aa3625c8e491 | refs/heads/master | 2022-12-10T10:00:53.351251 | 2022-05-17T08:20:10 | 2022-05-17T08:20:10 | 237,963,083 | 1 | 0 | Apache-2.0 | false | 2022-12-08T03:33:00 | 2020-02-03T12:45:54 | 2022-02-18T20:31:45 | 2022-12-08T03:32:59 | 104,450 | 1 | 0 | 23 | Python | false | false | # Generated by Django 3.0.7 on 2021-01-26 08:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hlwtadmin', '0042_auto_20201203_1116'),
]
operations = [
migrations.AlterField(
model_name='concert',
name='date',
field=models.DateField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='historicalconcert',
name='date',
field=models.DateField(blank=True, db_index=True, null=True),
),
]
| UTF-8 | Python | false | false | 595 | py | 182 | 0043_auto_20210126_0833.py | 150 | 0.581513 | 0.529412 | 0 | 23 | 24.869565 | 73 |
flyhigher139/geektime_ebook_maker | 12,034,498,398,048 | 3b5e5492272bfbc140b90896bc75fc35daee12dd | 1df13f93dbedaefc1cdfd0caf6085d6027120094 | /geektime_dl/data_client/__init__.py | adf802368201b12dddd909e8724fe6db0b5c5261 | [] | no_license | https://github.com/flyhigher139/geektime_ebook_maker | da88a0f4422fcb52bdb54a8c554628cf2f40ba5b | 9d92e6b55411a6bc291ff1aa9c32fdf846001fca | refs/heads/master | 2021-06-11T09:04:32.665398 | 2021-06-02T10:49:14 | 2021-06-02T10:49:14 | 152,719,193 | 5 | 1 | null | true | 2021-06-02T10:49:15 | 2018-10-12T08:32:19 | 2021-04-22T02:01:02 | 2021-06-02T10:49:14 | 410 | 5 | 1 | 0 | Python | false | false | # coding=utf8
import json
import os
import functools
import threading
import time
import atexit
from tinydb import TinyDB, Query
from tinydb.storages import JSONStorage
from tqdm import tqdm
from geektime_dl.data_client.gk_apis import GkApiClient
from geektime_dl.utils import synchronized
def _local_storage(table: str):
"""
存取本地 课程/章节 内容
"""
def decorator(func):
@functools.wraps(func)
def wrap(self: 'DataClient', *args, **kwargs):
nonlocal table
force = kwargs.get('force')
_id = kwargs.get('{}_id'.format(table)) or args[0]
collection = Query()
data = None
if not force:
res = self.db.table(table).search(collection.id == _id)
if res:
data = res[0]
if data is None:
data = func(self, *args, **kwargs)
self.db.table(table).upsert(data, collection.id == _id)
return data
return wrap
return decorator
class DataClient:
def __init__(self, gk: GkApiClient, db: TinyDB):
self._gk = gk
self.db = db
self._lock = threading.Lock() # tinydb 线程不安全
@property
def gk(self):
return self._gk
def get_course_list(self, **kwargs) -> dict:
"""
获取课程列表
"""
return self._gk.get_course_list()
@synchronized()
@_local_storage('course')
def get_course_intro(self, course_id: int, **kwargs) -> dict:
"""
获取 course 简介
"""
data = self._gk.get_course_intro(course_id)
return data
@synchronized()
@_local_storage('post')
def get_post_content(self, post_id: int, **kwargs) -> dict:
"""
获取 post 的所有内容,包括评论
"""
data = self._gk.get_post_content(post_id)
data['comments'] = self._get_post_comments(post_id)
return data
def _get_post_comments(self, post_id: int) -> list:
"""
获取 post 的评论
"""
data = self._gk.get_post_comments(post_id)
for c in data:
c['replies'] = json.dumps(c.get('replies', []))
return data
def get_course_content(self, course_id: int, force: bool = False,
pbar=True, pbar_desc='') -> list:
"""
获取课程ID为 course_id 的所有章节内容
"""
posts = []
post_ids = self._gk.get_post_list_of(course_id)
if pbar:
post_ids = tqdm(post_ids)
post_ids.set_description(pbar_desc)
for post in post_ids:
post_detail = self.get_post_content(post['id'], force=force)
posts.append(post_detail)
return posts
def get_video_collection_list(self, **kwargs) -> list:
"""
获取每日一课合辑列表
"""
return self._gk.get_video_collection_list()
@synchronized()
@_local_storage('video-collection')
def get_video_collection_intro(self, collection_id: int, **kwargs) -> dict:
"""
获取每日一课合辑简介
"""
data = self._gk.get_video_collection_intro(collection_id)
return data
@synchronized()
@_local_storage('daily')
def get_daily_content(self, video_id: int, **kwargs) -> dict:
"""
获取每日一课内容
"""
data = self._gk.get_post_content(video_id)
return data
def get_video_collection_content(self, collection_id: int,
force: bool = False,
pbar=True, pbar_desc='') -> list:
"""
获取每日一课合辑ID 为 collection_id 的所有视频内容
"""
data = []
v_ids = self._gk.get_video_list_of(collection_id)
if pbar:
v_ids = tqdm(v_ids)
v_ids.set_description(pbar_desc)
for v_id in v_ids:
v = self.get_daily_content(v_id['article_id'], force=force)
data.append(v)
return data
class _JSONStorage(JSONStorage):
"""
Store the data in a JSON file.
重写 JSONStorage,优化性能
1、read 不读文件
2、write 10s 刷一次盘
3、退出时刷盘保存数据以免数据丢失
"""
SAVE_DELTA = 10
def __init__(self, path, create_dirs=False, encoding=None, **kwargs):
super().__init__(path, create_dirs, encoding, **kwargs)
self._data = super().read()
self._last_save = time.time()
atexit.register(self._register_exit)
def _register_exit(self):
super().write(self._data)
super().close()
def read(self) -> dict:
return self._data
def write(self, data) -> None:
self._data = data
now = time.time()
if now - self._last_save > self.SAVE_DELTA:
super().write(data)
self._last_save = now
def close(self):
pass
dc_global = None
def get_data_client(cfg: dict) -> DataClient:
global dc_global
if dc_global is not None:
return dc_global
gk = GkApiClient(
account=cfg['account'],
password=cfg['password'],
area=cfg['area'],
no_login=cfg['no_login']
)
f = os.path.expanduser(
os.path.join(cfg['output_folder'], 'geektime-localstorage.json'))
db = TinyDB(f, storage=_JSONStorage)
dc = DataClient(gk, db)
dc_global = dc
return dc
| UTF-8 | Python | false | false | 5,545 | py | 23 | __init__.py | 19 | 0.540413 | 0.53852 | 0 | 201 | 25.283582 | 79 |
danbok/city-places | 5,385,889,001,188 | 048c01127381fa87a0d527201341312b32b5d10e | ce6337c0faef6f2b3a6635759a21ccf7ec3c2a87 | /custom_auth_system/views.py | fa68572a6b0ebb73aec17e9c88ee6a7c3da089a4 | [] | no_license | https://github.com/danbok/city-places | 1e86c2c720c0f2a1b4d6a7580fe342e6bcec333a | 4a545789dd511014c71622c77c10322e808d6752 | refs/heads/master | 2016-09-22T22:02:10.326239 | 2016-06-16T06:12:23 | 2016-06-16T06:12:23 | 61,266,361 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import logout
from django.shortcuts import render
def logout_user(request):
logout(request)
return render(request, template_name='static_pages/index.html')
| UTF-8 | Python | false | false | 191 | py | 34 | views.py | 20 | 0.774869 | 0.774869 | 0 | 7 | 26.285714 | 67 |
jgera/Semantic-WoT-Environment-Simulation | 16,320,875,752,657 | 63503dd158fb12a60a0528a4e2ccf8c086129caf | 491b7696660f1f39f9cbcfcf6e4d1cfec9066630 | /test/netuse/test_devices.py | 05044b97786e83e3dbc9a9c1cd8c69f50d188df1 | [
"Apache-2.0"
] | permissive | https://github.com/jgera/Semantic-WoT-Environment-Simulation | 16cb940f3decefae352f048e78e5faca1c2fd94c | a91af1a65036b62a42ce1ae74b652ee2a5a7e651 | refs/heads/master | 2020-12-31T02:32:12.780345 | 2013-12-13T16:18:04 | 2013-12-13T16:18:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
'''
Copyright (C) 2012 onwards University of Deusto
All rights reserved.
This software is licensed as described in the file COPYING, which
you should have received as part of this distribution.
This software consists of contributions made by many individuals,
listed below:
@author: Aitor Gómez Goiri <aitor.gomez@deusto.es>
'''
import unittest
from mock import Mock, patch
from netuse.devices import XBee
def side_effect(*args):
return args[0] + args[1]/2 # just to check how to configure different returns
rndMock = Mock()
rndMock.normalvariate.side_effect = side_effect
#rndMock.normalvariate.return_value = 0
class TestDeviceType(unittest.TestCase):
#def setUp(self):
def getMockedDevice(self, device):
resources = Mock()
resources.capacity = 10
resources.n = 0
device._DeviceType__resources = resources
return device
@patch('netuse.results.G.Rnd', rndMock) # new global unrandomized variable
def test_get_time_needed_to_answer(self):
dev = self.getMockedDevice(XBee())
#self.assertEquals(779.0, dev.getTimeNeededToAnswer())
self.assertTimeNeeded(dev,1,0,(77,1)) # 1 resources being used (me!)
self.assertTimeNeeded(dev,5,0,(392,8)) # 5 resources being used
self.assertTimeNeeded(dev,7,2,(392,8)) # 5 resources being used
self.assertTimeNeeded(dev,10,0,(775.0,8.0)) # 10 resources being used
self.assertTimeNeeded(dev,20,10,(775.0,8.0)) # 10 resources being used
self.assertTimeNeeded(dev,2,0,(155.75,2.75)) # =(392-77)/4*1 +77
self.assertTimeNeeded(dev,3,0,(234.5,4.5)) # =(392-77)/4*2 +77
self.assertTimeNeeded(dev,4,0,(313.25,6.25)) # =(392-77)/4*3 +77
self.assertTimeNeeded(dev,6,0,(468.6,8.0)) # =(775-392)/4*1 +392
self.assertTimeNeeded(dev,7,0,(545.2,8.0))
self.assertTimeNeeded(dev,8,0,(621.8,8.0))
self.assertTimeNeeded(dev,9,0,(698.4,8.0))
def assertTimeNeeded(self, device, capacity, free_resources, expectParameters):
device.get_time_needed_to_answer(capacity - free_resources)
rndMock.normalvariate.assert_called_with(expectParameters[0], expectParameters[1]) # for 10 concurrent requests
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 2,360 | py | 112 | test_devices.py | 107 | 0.659178 | 0.590928 | 0 | 64 | 35.875 | 119 |
nurshahjalal/PyAPI | 11,665,131,179,256 | f32a86a337c63348e1a376061694109008f3e6b8 | 22553d5e0644f5ea1395b38684f5b8b928b0d983 | /src/utilities/requestsUtilities.py | 0dc9f7ec08256befea14d718e02681b1f1ae9d12 | [] | no_license | https://github.com/nurshahjalal/PyAPI | e19df1dc7086ae51d400a4db80378e453bf24970 | 549c4a3c4e8033975a0f5d0f66704c2c92a2431e | refs/heads/main | 2023-02-07T05:56:46.279428 | 2020-12-22T05:58:21 | 2020-12-22T05:58:21 | 323,529,723 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import os
from requests_oauthlib import OAuth1
from src.configs.host_config import API_HOST
from src.utilities.credentialUtility import CredentialUtility
import logging as logger
class RequestUtility(object):
def __init__(self):
# get the ENV variable from environment if ENV is not set then default is test
self.env = os.environ.get("ENV", "test")
self.baseURL = API_HOST[self.env]
# As get_api_keys is static method the class does not need to instantiate and no need self
api_creds = CredentialUtility.get_api_keys()
self.auth = OAuth1(api_creds["CLIENT_KEY"], api_creds["CLIENT_SECRET"])
def get(self):
pass
def check_stataus_code(self, res_status_code, expected_status_code):
assert res_status_code == expected_status_code, \
f"Expected status code {expected_status_code} but actual {res_status_code}"
def post(self, endpoint, payload=None, headers=None, expected_status_code=200):
post_url = self.baseURL + endpoint
if not headers:
headers = {
"Content-Type": "Application/json"
}
post_response = requests.post(url=post_url, data=payload, headers=headers, auth=self.auth)
res_status_code = post_response.status_code
self.check_stataus_code(res_status_code, expected_status_code)
rs_jason = post_response.json()
logger.debug(f"Response Json : \n {rs_jason} \n ########")
logger.debug(f'Response Status Code {res_status_code} ')
# assert post_response.status_code == int(expected_status_code), \
# f"Expected status code {expected_status_code} but actual {post_response.status_code}"
return rs_jason
| UTF-8 | Python | false | false | 1,755 | py | 9 | requestsUtilities.py | 7 | 0.659259 | 0.65641 | 0 | 45 | 38 | 99 |
tingxin/SeleniumProjectTemplate | 9,955,734,232,219 | 05c1c68b39f6e8b29e918efcfa6976717c31d1e7 | 54651258c3e8302fe0036d8e12c3595468cc54f7 | /cases/workflow.py | 065dbed84cda1597b7fdafa348f647fc3bceccdc | [] | no_license | https://github.com/tingxin/SeleniumProjectTemplate | 60e5b2db88c6e7472d9c0cab5f852c97d2964b2c | d182aa48a1c45214968afb040b03ae159fa31e03 | refs/heads/master | 2020-06-10T17:16:43.876480 | 2016-12-08T09:55:52 | 2016-12-08T09:55:52 | 75,923,970 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from common.testcases import TestCases
from selenium.webdriver.common.keys import Keys
import time
class WorkflowTestCases(TestCases):
def test_search_many_times(self):
print("Test searching in DevNet")
driver = self.driver
test_keys = ["deviot", "cmx", "aci", "sdn"]
for i in range(0, len(test_keys)):
driver.get("https://developer.cisco.com/site/devnet/home/index.gsp")
self.assertIn("Cisco Devnet", driver.title)
elem = driver.find_element_by_name("q")
elem.send_keys(test_keys[i])
elem.send_keys(Keys.RETURN)
assert "Cisco DevNet: DevNetCreations - DevIoT" not in driver.page_source
time.sleep(1)
| UTF-8 | Python | false | false | 725 | py | 8 | workflow.py | 5 | 0.630345 | 0.627586 | 0 | 19 | 37.105263 | 85 |
RomAzhnak/Python | 5,858,335,420,933 | 6b84e9ef0360b1eb49d28fe49a46f14507f6d6ee | ad9f3148f0a7d0205df7703ed92fb0650b5a7784 | /socket_client_tcp.py | c13f05d55311a14ce2c2e1408ffb738981e2e0be | [] | no_license | https://github.com/RomAzhnak/Python | 43e6dd4e721a1a33ff8c4d6ea02c1a620c25f08b | ad6cc5232920687d795c49047b0fd0a6143443b6 | refs/heads/master | 2023-06-07T11:44:26.960118 | 2021-06-23T18:31:07 | 2021-06-23T18:31:07 | 369,824,867 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 8888))
msg = input("Message: ")
msg = msg.encode(encoding="utf-8")
# msg = bytearray(msg, 'UTF-8')
sock.send(msg)
sock.close()
| UTF-8 | Python | false | false | 235 | py | 8 | socket_client_tcp.py | 7 | 0.659574 | 0.608511 | 0 | 9 | 24.111111 | 56 |
Guin-/translator | 2,018,634,673,370 | 8deefc1b926dad5200939e4554f0ecaba99eb66e | ffdc3270b715c267c7731e323395e26fec0f8865 | /translator/backend/serializers.py | 9dcfd3f1d048bdffb4540d5605fcb649182ec02a | [] | no_license | https://github.com/Guin-/translator | 2d4521395ba6822a9e1efd99dc3d819986b80063 | b46b27333a49caf7ee454a07ebb38ec797c1dbd5 | refs/heads/master | 2021-01-20T18:58:50.248205 | 2016-08-23T16:19:02 | 2016-08-23T16:19:02 | 63,825,533 | 1 | 0 | null | false | 2016-08-23T16:19:03 | 2016-07-21T01:07:54 | 2016-07-21T04:26:35 | 2016-08-23T16:19:02 | 26 | 0 | 0 | 3 | Python | null | null | from rest_framework import serializers
from .models import Translation
class TranslationSerializer(serializers.ModelSerializer):
language = serializers.CharField(read_only=True)
translation = serializers.CharField(read_only=True)
class Meta:
model = Translation
fields = ('id', 'input_text', 'language', 'translation', 'timestamp')
| UTF-8 | Python | false | false | 363 | py | 17 | serializers.py | 11 | 0.732782 | 0.732782 | 0 | 10 | 35.2 | 77 |
gabriellaec/desoft-analise-exercicios | 7,842,610,298,164 | 96d65390c3bdc7a4b198015c461270e40a32f8c7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_301/ch14_2019_03_14_22_52_38_250667.py | e57d18830c507521c0edfb04dbfc36481deb3995 | [] | no_license | https://github.com/gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def calcula_volume_da_esfera(R):
y=(4*math.pi*R**3)/4
return y | UTF-8 | Python | false | false | 70 | py | 35,359 | ch14_2019_03_14_22_52_38_250667.py | 35,352 | 0.614286 | 0.571429 | 0 | 3 | 22.666667 | 32 |
lionheartStark/GCN-graduate-design | 18,227,841,211,090 | 80b4e06edbe419e2af94df349bfcc684e06e35d0 | dd4316123277f904adc1da5e1869e57bca144b71 | /Elliptic_dataset_GCN.py | 5fb87ee062709bbe4207d05e1988bbc38ba8b1b1 | [
"MIT"
] | permissive | https://github.com/lionheartStark/GCN-graduate-design | 2b98649d9669ac5ff5f8683530801fd08c7f26a5 | a0da3bda3eaf2e9b447915dbc968dc21cfe07639 | refs/heads/main | 2023-03-13T14:54:22.570837 | 2021-03-02T01:16:07 | 2021-03-02T01:16:07 | 343,096,655 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/JungWoo-Chae/GCN_Elliptic_dataset/blob/main/Elliptic_dataset_GCN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Bitcoin Fraud Detection System with GCN
# ## Pytorch Geometric Environment Setting
# In[ ]:
# Install required packages.
# !pip install -q torch-scatter==latest+cu102 -f https://pytorch-geometric.com/whl/torch-1.7.0.html
# !pip install -q torch-sparse==latest+cu102 -f https://pytorch-geometric.com/whl/torch-1.7.0.html
# !pip install -q git+https://github.com/rusty1s/pytorch_geometric.git
# ## Library Import
# In[1]:
import numpy as np
import networkx as nx
import os
import pandas as pd
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Embedding
from torch.nn import Parameter
from torch_geometric.data import Data, DataLoader
from torch_geometric.nn import GCNConv,GATConv
from torch_geometric.utils.convert import to_networkx
from torch_geometric.utils import to_undirected
# In[2]:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# # **Please insert Kaggle username and kaggle key**
# In[3]:
# os.environ['KAGGLE_USERNAME'] = "@@@@@@@@@" # username from the json file
# os.environ['KAGGLE_KEY'] = "####################" # key from the json file
# !kaggle datasets download -d ellipticco/elliptic-data-set
# !unzip elliptic-data-set.zip
# !mkdir elliptic_bitcoin_dataset_cont
# ## Data Preparation
# In[4]:
# Load Dataframe
df_edge = pd.read_csv('elliptic_bitcoin_dataset/elliptic_txs_edgelist.csv')
df_class = pd.read_csv('elliptic_bitcoin_dataset/elliptic_txs_classes.csv')
df_features = pd.read_csv('elliptic_bitcoin_dataset/elliptic_txs_features.csv', header=None)
# Setting Column name
df_features.columns = ['id', 'time step'] + [f'trans_feat_{i}' for i in range(93)] + [f'agg_feat_{i}' for i in
range(72)]
print('Number of edges: {}'.format(len(df_edge)))
# ## Get Node Index
# In[5]:
all_nodes = list(
set(df_edge['txId1']).union(set(df_edge['txId2'])).union(set(df_class['txId'])).union(set(df_features['id'])))
nodes_df = pd.DataFrame(all_nodes, columns=['id']).reset_index()
print('Number of nodes: {}'.format(len(nodes_df)))
# ## Fix id index
# In[6]:
df_edge = df_edge.join(nodes_df.rename(columns={'id': 'txId1'}).set_index('txId1'), on='txId1', how='inner').join(
nodes_df.rename(columns={'id': 'txId2'}).set_index('txId2'), on='txId2', how='inner', rsuffix='2').drop(
columns=['txId1', 'txId2']).rename(columns={'index': 'txId1', 'index2': 'txId2'})
df_edge.head()
# In[7]:
df_class = df_class.join(nodes_df.rename(columns={'id': 'txId'}).set_index('txId'), on='txId', how='inner').drop(
columns=['txId']).rename(columns={'index': 'txId'})[['txId', 'class']]
df_class.head()
# In[8]:
df_features = df_features.join(nodes_df.set_index('id'), on='id', how='inner').drop(columns=['id']).rename(
columns={'index': 'id'})
df_features = df_features[['id'] + list(df_features.drop(columns=['id']).columns)]
df_features.head()
# In[9]:
df_edge_time = df_edge.join(df_features[['id', 'time step']].rename(columns={'id': 'txId1'}).set_index('txId1'),
on='txId1', how='left', rsuffix='1').join(
df_features[['id', 'time step']].rename(columns={'id': 'txId2'}).set_index('txId2'), on='txId2', how='left',
rsuffix='2')
df_edge_time['is_time_same'] = df_edge_time['time step'] == df_edge_time['time step2']
df_edge_time_fin = df_edge_time[['txId1', 'txId2', 'time step']].rename(
columns={'txId1': 'source', 'txId2': 'target', 'time step': 'time'})
# ## Create csv from Dataframe
# In[10]:
df_features.drop(columns=['time step']).to_csv('elliptic_bitcoin_dataset_cont/elliptic_txs_features.csv', index=False,
header=None)
df_class.rename(columns={'txId': 'nid', 'class': 'label'})[['nid', 'label']].sort_values(by='nid').to_csv(
'elliptic_bitcoin_dataset_cont/elliptic_txs_classes.csv', index=False, header=None)
df_features[['id', 'time step']].rename(columns={'id': 'nid', 'time step': 'time'})[['nid', 'time']].sort_values(
by='nid').to_csv('elliptic_bitcoin_dataset_cont/elliptic_txs_nodetime.csv', index=False, header=None)
df_edge_time_fin[['source', 'target', 'time']].to_csv('elliptic_bitcoin_dataset_cont/elliptic_txs_edgelist_timed.csv',
index=False, header=None)
# ## Graph Preprocessing
# In[11]:
node_label = df_class.rename(columns={'txId': 'nid', 'class': 'label'})[['nid', 'label']].sort_values(by='nid').merge(
df_features[['id', 'time step']].rename(columns={'id': 'nid', 'time step': 'time'}), on='nid', how='left')
node_label['label'] = node_label['label'].apply(lambda x: '3' if x == 'unknown' else x).astype(int) - 1
node_label.head()
# In[12]:
merged_nodes_df = node_label.merge(
df_features.rename(columns={'id': 'nid', 'time step': 'time'}).drop(columns=['time']), on='nid', how='left')
merged_nodes_df.head()
# In[13]:
train_dataset = []
test_dataset = []
for i in range(49):
nodes_df_tmp = merged_nodes_df[merged_nodes_df['time'] == i + 1].reset_index()
nodes_df_tmp['index'] = nodes_df_tmp.index
df_edge_tmp = df_edge_time_fin.join(
nodes_df_tmp.rename(columns={'nid': 'source'})[['source', 'index']].set_index('source'), on='source',
how='inner').join(nodes_df_tmp.rename(columns={'nid': 'target'})[['target', 'index']].set_index('target'),
on='target', how='inner', rsuffix='2').drop(columns=['source', 'target']).rename(
columns={'index': 'source', 'index2': 'target'})
x = torch.tensor(np.array(nodes_df_tmp.sort_values(by='index').drop(columns=['index', 'nid', 'label'])),
dtype=torch.float)
edge_index = torch.tensor(np.array(df_edge_tmp[['source', 'target']]).T, dtype=torch.long)
edge_index = to_undirected(edge_index)
mask = nodes_df_tmp['label'] != 2
y = torch.tensor(np.array(nodes_df_tmp['label']))
if i + 1 < 35:
data = Data(x=x, edge_index=edge_index, train_mask=mask, y=y)
train_dataset.append(data)
else:
data = Data(x=x, edge_index=edge_index, test_mask=mask, y=y)
test_dataset.append(data)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
# ## Model
# In[ ]:
class GCN(torch.nn.Module):
def __init__(self, num_node_features, hidden_channels, use_skip=False, conv1=GCNConv, conv2=GCNConv):
super(GCN, self).__init__()
self.conv1 = conv1(num_node_features, hidden_channels[0])
self.conv2 = conv2(hidden_channels[0], 2)
self.use_skip = use_skip
if self.use_skip:
self.weight = nn.init.xavier_normal_(Parameter(torch.Tensor(num_node_features, 2)))
def forward(self, data):
x = self.conv1(data.x, data.edge_index)
x = x.relu()
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv2(x, data.edge_index)
if self.use_skip:
x = F.softmax(x + torch.matmul(data.x, self.weight), dim=-1)
else:
x = F.softmax(x, dim=-1)
return x
def embed(self, data):
x = self.conv1(data.x, data.edge_index)
return x
# In[ ]:
model = GCN(num_node_features=data.num_node_features, hidden_channels=[100])
model.to(device)
# ## Train
# #### Hyperparameter
# In[ ]:
patience = 50
lr = 0.001
epoches = 1000
# In[ ]:
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor([0.7, 0.3]).to(device))
train_losses = []
val_losses = []
accuracies = []
if1 = []
precisions = []
recalls = []
iterations = []
for epoch in range(epoches):
model.train()
train_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
out = model(data)
loss = criterion(out[data.train_mask], data.y[data.train_mask].long())
_, pred = out[data.train_mask].max(dim=1)
loss.backward()
train_loss += loss.item() * data.num_graphs
optimizer.step()
train_loss /= len(train_loader.dataset)
if (epoch + 1) % patience == 0:
model.eval()
ys, preds = [], []
val_loss = 0
for data in test_loader:
data = data.to(device)
out = model(data)
loss = criterion(out[data.test_mask], data.y[data.test_mask].long())
val_loss += loss.item() * data.num_graphs
_, pred = out[data.test_mask].max(dim=1)
ys.append(data.y[data.test_mask].cpu())
preds.append(pred.cpu())
y, pred = torch.cat(ys, dim=0).numpy(), torch.cat(preds, dim=0).numpy()
val_loss /= len(test_loader.dataset)
f1 = f1_score(y, pred, average=None)
mf1 = f1_score(y, pred, average='micro')
precision = precision_score(y, pred, average=None)
recall = recall_score(y, pred, average=None)
iterations.append(epoch + 1)
train_losses.append(train_loss)
val_losses.append(val_loss)
if1.append(f1[0])
accuracies.append(mf1)
precisions.append(precision[0])
recalls.append(recall[0])
print(
'Epoch: {:02d}, Train_Loss: {:.4f}, Val_Loss: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, Illicit f1: {:.4f}, F1: {:.4f}'.format(
epoch + 1, train_loss, val_loss, precision[0], recall[0], f1[0], mf1))
# In[ ]:
a, b, c, d = train_losses, val_losses, if1, accuracies
import pickle
g = [a, b, c, d]
pickle.dump(g, open('res_' + f'{epoches}', 'wb'))
with open('res_' + f'{epoches}', "rb") as f:
g = pickle.load(f)
a, b, c, d = g
ep = [i for i in range(patience, epoches + 1, patience)]
plt.figure()
plt.plot(np.array(ep), np.array(a), 'r', label='Train loss')
plt.plot(np.array(ep), np.array(b), 'g', label='Valid loss')
plt.plot(np.array(ep), np.array(c), 'black', label='Illicit F1')
plt.plot(np.array(ep), np.array(d), 'orange', label='F1')
plt.legend(['Train loss', 'Valid loss', 'Illicit F1', 'F1'])
plt.ylim([0, 1.0])
plt.xlim([patience, epoches])
plt.savefig("filename.png")
plt.show()
# In[ ]:
| UTF-8 | Python | false | false | 10,560 | py | 9 | Elliptic_dataset_GCN.py | 7 | 0.615814 | 0.601136 | 0 | 316 | 32.417722 | 234 |
Raghuvar/edyst_assignment | 13,194,139,544,037 | c0fc8057262a32e11c253c7a64af0e20c13d3c13 | 5d8a4445055ad5d88e13b73120d4e31a164a07dc | /flask_virenv/lib/python3.6/tempfile.py | ef7444d272e7475e2b99b29817aa7a3f1773bdb1 | [] | no_license | https://github.com/Raghuvar/edyst_assignment | 9bc452366682b8f5ba795abf0adf5428f2fc01f5 | 8b5dc6f54883192d04a8da01bf8f0b1aba9dcd76 | refs/heads/master | 2020-04-05T16:31:12.823164 | 2018-11-10T20:02:16 | 2018-11-10T20:02:16 | 157,016,395 | 0 | 0 | null | false | 2020-01-09T05:19:56 | 2018-11-10T19:56:59 | 2018-11-10T20:02:35 | 2020-01-09T05:19:54 | 9,471 | 0 | 0 | 1 | Python | false | false | /home/raghuvar/anaconda3/lib/python3.6/tempfile.py | UTF-8 | Python | false | false | 50 | py | 49 | tempfile.py | 43 | 0.84 | 0.78 | 0 | 1 | 50 | 50 |
aholbrook03/pyopengl-example | 2,869,038,156,136 | b935291bd8bb210e56868a7b8965f46c3d050ca7 | 32d9a186a8054375ff0a20ba7faae132e3a3b15d | /etgg2801/robot.py | 505f2f68621dc0c1caf07a86050415402cb81319 | [] | no_license | https://github.com/aholbrook03/pyopengl-example | 2fbf7b74154cad20641f4073a32278639c70a27f | ff73e3e97d58a074fca35a1847d3c4a191df624d | refs/heads/master | 2021-08-22T07:18:33.720778 | 2017-11-29T15:46:35 | 2017-11-29T15:46:35 | 112,494,372 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # FILENAME: robot.py
# BY: Andrew Holbrook
# DATE: 9/24/2015
from OpenGL import GL
from . import GLWindow, Vector4, Matrix4
class Joint(object):
"""Base class for all joint types (prismatic, revolute, etc).
"""
def __init__(self, partA, partB, axis=(0,1,0), offset=(0,0,0)):
"""Creates a joint linking parts A and B. The axis of motion can be
specified, along with the offset between parts A and B.
"""
self.partA = partA
self.partB = partB
self.value = 0.0
self.velocity = 0.0
self.axis = axis
self.offset = offset
self.valueMin = 0.0
self.valueMax = 0.0
self.dfunc = self.increaseValue
self.offsetMatrix = Matrix4.getTranslation(*offset)
def increaseValue(self, dtime):
"""Increase the value (angle or distance) of the joint with respect to
the elapsed time (dtime)--the maximum joint value is observed.
"""
self.value = min(self.valueMax, self.value + self.velocity * dtime)
if self.value == self.valueMax:
self.dfunc = self.decreaseValue
def decreaseValue(self, dtime):
"""Decrease the value (angle or distance) of the joint with respect to
the elapsed time (dtime)--the minimum joint value is observed.
"""
self.value = max(self.valueMin, self.value - self.velocity * dtime)
if self.value == self.valueMin:
self.dfunc = self.increaseValue
def setLimits(self, min, max):
"""Sets the minimum/maximum joint limits.
"""
self.valueMin = min
self.valueMax = max
class RevoluteJoint(Joint):
"""Class for representing a rotating joint with one degree of freedom.
"""
def __init__(self, partA, partB, axis=(0,1,0), offset=(0,0,0)):
"""See Joint class.
"""
super().__init__(partA, partB, axis, offset)
def getTransformation(self):
"""Return the transformation matrix representing partB relative to partA.
"""
angleList = [a * self.value for a in self.axis]
return self.offsetMatrix * Matrix4.getRotation(*angleList)
class PrismaticJoint(Joint):
def __init__(self, partA, partB, axis=(0,1,0), offset=(0,0,0)):
"""See Joint class.
"""
super().__init__(partA, partB, axis, offset)
def getTransformation(self):
"""Return the transformation matrix representing partB relative to partA.
"""
dList = [a * self.value for a in self.axis]
return self.offsetMatrix * Matrix4.getTranslation(*dList)
class Robot(object):
def __init__(self, model):
self.model = model
self.joints = []
self.position = Vector4()
self.orientation = Vector4()
renderDelegate = GLWindow.getInstance().renderDelegate
self.modelview_loc = renderDelegate.modelview_loc
def addJoint(self, joint):
self.joints.append(joint)
def cleanup(self):
self.model.cleanup()
def update(self, dtime):
for j in self.joints:
j.dfunc(dtime)
def render(self):
rotMatrix_ow = Matrix4.getRotation(*self.orientation.getXYZ())
tranMatrix_ow = Matrix4.getTranslation(*self.position.getXYZ())
# object to world matrix
matrix_ow = tranMatrix_ow * rotMatrix_ow
GL.glUniformMatrix4fv(self.modelview_loc, 1, False, matrix_ow.getCType())
self.model.renderPartByName(self.joints[0].partA)
for j in self.joints:
matrix_ow *= j.getTransformation()
GL.glUniformMatrix4fv(self.modelview_loc, 1, False, matrix_ow.getCType())
self.model.renderPartByName(j.partB)
class Scara(Robot):
def __init__(self, model):
super().__init__(model)
self.addJoint(RevoluteJoint("L0", "L1"))
self.addJoint(RevoluteJoint("L1", "L2", offset=(-0.325,0.0)))
self.addJoint(PrismaticJoint("L2", "d3"))
self.joints[0].velocity = 386.0 / 1000.0
self.joints[1].velocity = 720.0 / 2000.0
self.joints[2].velocity = 1.1 / 1000.0
self.joints[0].setLimits(-105, 105)
self.joints[1].setLimits(-150, 150)
self.joints[2].setLimits(-0.21, 0.21)
class Viper(Robot):
def __init__(self, model):
super().__init__(model)
self.addJoint(RevoluteJoint('L0', 'L1'))
self.addJoint(RevoluteJoint('L1', 'L2', (0, 0, 1), (-0.075, 0.335, 0.0)))
self.addJoint(RevoluteJoint('L2', 'L3', (0, 0, 1), (-0.365, 0, 0)))
self.addJoint(RevoluteJoint('L3', 'L4', (0, 1, 0), (0.09, 0, 0)))
self.addJoint(RevoluteJoint('L4', 'L5', (0, 0, 1), (0, 0.4, 0)))
self.joints[0].velocity = 328.0 / 1000.0
self.joints[1].velocity = 300.0 / 1000.0
self.joints[2].velocity = 375.0 / 1000.0
self.joints[3].velocity = 375.0 / 1000.0
self.joints[4].velocity = 375.0 / 1000.0
self.joints[0].setLimits(-170, 170)
self.joints[1].setLimits(-190, 45)
self.joints[2].setLimits(-29, 256)
self.joints[3].setLimits(-190, 190)
self.joints[4].setLimits(-120, 120)
| UTF-8 | Python | false | false | 5,268 | py | 11 | robot.py | 7 | 0.585801 | 0.540623 | 0 | 145 | 35.275862 | 85 |
abrarShariar/Algorithmic-problem-solving | 11,441,792,924,870 | bab14b3f89a7f90971f353ab5b461c5197ce17b4 | d6d65c502c3fa3e6570355530db61b32527ff1b0 | /python-linkedin/Ex_Files_python_dpatterns/Exercise Files/Ch02/02_06/singleton.py | ad582f0a09a77065fc8ae901acdb024d69fa6b2e | [] | no_license | https://github.com/abrarShariar/Algorithmic-problem-solving | f6b129e85e7120ec46faad9c3293b37db056211f | 5649fa35352a1c468c2a935135f46202280575c0 | refs/heads/master | 2023-04-11T00:54:39.280958 | 2021-12-16T13:50:06 | 2021-12-16T13:50:06 | 49,055,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Borg:
"""The Borg design pattern"""
# attribute dictionary
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class Singleton(Borg):
"""The singleton class"""
| UTF-8 | Python | false | false | 230 | py | 657 | singleton.py | 622 | 0.556522 | 0.556522 | 0 | 13 | 16.153846 | 36 |
muzammilafsar/Trailzz | 8,486,855,383,549 | f433413bc077775ed7d7764545fcfc6c3212a989 | b7b495abf20c37506ff1a86839fb74a2a5cf75b6 | /movie/views.py | aee23e8ee6968278c478635fe6db1140e1aff97d | [] | no_license | https://github.com/muzammilafsar/Trailzz | 6e7eda05627b8cf04ca5c9773ab4cf3a417057b7 | 1435103a22a2d1f7ebbbe01766295be5d5147e21 | refs/heads/master | 2020-07-20T18:56:51.557444 | 2016-09-11T18:06:56 | 2016-09-11T18:06:56 | 67,941,788 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.views import generic
from .models import Movie_Details
# Create your views here.
class Home(generic.ListView):
template_name = 'movie/home.html'
context_object_name = 'object_list'
def get_queryset(self):
return Movie_Details.objects.all()[::-1]
class Detail(generic.DetailView):
model = Movie_Details
template_name = 'movie/detail.html' | UTF-8 | Python | false | false | 379 | py | 22 | views.py | 13 | 0.712401 | 0.709763 | 0 | 13 | 28.230769 | 48 |
ulisesmx/exercises | 2,456,721,304,768 | daed40a235a56a96a1090d28f10f617822e75341 | 994333bed095004c9aac822ec19974f402aa4ea5 | /legacy_code/bit_manipulation/flipping_bits.py | 54b164451c939b5580fd0fc3a02652c01a4faadd | [] | no_license | https://github.com/ulisesmx/exercises | 84c37526ceede9399a43b5cd9baa9c3e7b7643bc | fb2430e508ab7f9da79c3dae5ed98413c28a2536 | refs/heads/master | 2020-04-03T22:05:51.345706 | 2019-01-22T06:49:33 | 2019-01-22T06:49:33 | 56,024,774 | 1 | 0 | null | false | 2016-05-05T16:18:30 | 2016-04-12T02:37:58 | 2016-04-12T02:43:24 | 2016-05-05T16:18:30 | 19 | 0 | 0 | 0 | Python | null | null | #!/bin/python
t = int(raw_input())
sum_val = 2**32
for _ in xrange(t):
print (~int(raw_input()) + sum_val)
| UTF-8 | Python | false | false | 113 | py | 486 | flipping_bits.py | 486 | 0.566372 | 0.539823 | 0 | 7 | 15.142857 | 39 |
Bekbolot888/Hoome_Worke | 17,214,228,947,997 | aaaf8af8321efb0a02b516fdd2ab344dd6d597da | d1813b0217264057c867c23e70a6262586bbe2f3 | /Warmup-1/monkey_trouble.py | 7a088b96367d71903b657b650f7fa5ed9e0f2f26 | [] | no_license | https://github.com/Bekbolot888/Hoome_Worke | 7642dc769522cc9d5610e145f12b3144546a11c5 | 168f206101dc309f4157b89d4eb4751fe86951b3 | refs/heads/master | 2022-12-19T08:21:19.744052 | 2020-09-27T13:28:32 | 2020-09-27T13:28:32 | 297,949,833 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a_monkey = bool (False)
b_monkey = bool (True)
print(a_monkey, b_monkey)
if (a_monkey == True) and (b_monkey == True) or (a_monkey == False) and (b_monkey == False):
print("we are in trouble")
else:
print("we are not in trouble") | UTF-8 | Python | false | false | 243 | py | 17 | monkey_trouble.py | 17 | 0.621399 | 0.621399 | 0 | 7 | 33 | 92 |
Gordey007/parserSuper | 17,403,207,506,313 | 40377614df657c059a17dddd81f3e84110108fc2 | aa247390a6500e4fcdb3686d8d27008cbcd3cdd5 | /ParserSuperJob.py | a93d18ba35cbd5d4fb77501e84d93a56273945ec | [] | no_license | https://github.com/Gordey007/parserSuper | fa42aa4cdf282c1fa1ad0450fca1fd265b9ce348 | 4a20be401055196634962e8524da2b5d9b681672 | refs/heads/master | 2023-06-18T21:34:08.139164 | 2021-07-10T14:52:52 | 2021-07-10T14:52:52 | 305,690,272 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # use python ParserSuperJob.py
from urllib.request import urlopen
from urllib.parse import urljoin
from urllib.parse import quote
from lxml.html import fromstring
import xlsxwriter
ITEM_PATH = '._2CsQi ._2g1F- ._34bJi'
ITEM_PATH2 = '._2CsQi ._2g1F- .YYC5F'
PAGE = '._1BOkc'
def parser_vacancies():
f = urlopen(url)
list_html = f.read().decode('utf-8')
list_doc = fromstring(list_html)
dates = []
for elem in list_doc.cssselect(ITEM_PATH):
span = elem.cssselect('span')[0]
dates.append(span.text)
urls = []
for elem in list_doc.cssselect(ITEM_PATH2):
a = elem.cssselect('a')[0]
urls.append(urljoin(url2, a.get('href')))
vacancies = []
i = 0
for item in dates:
vacancy = {'date': item, 'url': urls[i]}
vacancies.append(vacancy)
i += 1
return vacancies
def export_excel(filename, vacancies):
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
field_names = ('Дата', 'URL')
for i, field in enumerate(field_names):
worksheet.write(0, i, field, bold)
fields = ('date', 'url')
for row, vacancy in enumerate(vacancies, start=1):
for col, field in enumerate(fields):
worksheet.write(row, col, vacancy[field])
workbook.close()
print('Ввидете, что искать')
search = input('> ')
print('Ввидете номер города или страны\nКомсомольск-на-амуре - 0\nХабаровск - 1\nrussia - 2')
sity_array = ['komsomolsk-na-amure', 'habarovsk', 'russia']
sity = sity_array[int(input('> '))]
url = 'https://' + sity + '.superjob.ru/resume/search_resume.html?keywords%5B0%5D%5Bkeys%5D=' + quote(search)\
+ '&keywords%5B0%5D%5Bskwc%5D=and&keywords%5B0%5D%5Bsrws%5D=7&sbmit=1'
url2 = 'https://' + sity + '.superjob.ru'
f = urlopen(url)
list_html = f.read().decode('utf-8')
list_doc = fromstring(list_html)
export_excel('Вакансии ' + search + ' ' + sity + '.xlsx', parser_vacancies())
| UTF-8 | Python | false | false | 2,083 | py | 1 | ParserSuperJob.py | 1 | 0.637681 | 0.616692 | 0 | 71 | 27.183099 | 110 |
nameusea/pyGreat | 7,696,581,412,514 | c8907b1c04991a72ad375be9859448da8f1155b6 | dd3f5a712dbab0d3c4f4526c64c08ba710f78b81 | /use/ReinforceLearning/gymTest/tframe.py | d69e67c3f600c08c9e50517f4b6d817027adf9e6 | [] | no_license | https://github.com/nameusea/pyGreat | 3988ebcce3f80a7e458a20f9b2e3ccba368efcf8 | dde8b6a1348620ffd3b2d65db3d5b4331e5c78be | refs/heads/master | 2023-04-25T09:02:32.831423 | 2021-05-17T11:31:22 | 2021-05-17T11:31:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # gym环境类子类框架解读
import gym
class TestEnv(gym.Env):
# 元数据,用于支持可视化的一些设定,改变渲染环境时的参数,如果不想改变设置,可以无
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 2
}
# __init__():将会初始化动作空间与状态空间等环境所需的参量,便于强化学习算法在给定的状态空间中搜索合适的动作
def __init__(self):
self.action_space = None
self.observation_space = None
pass
# step():用于编写智能体与环境交互的逻辑
# 它接受一个动作(action)的输入
# 根据action给出下一时刻的状态(state)、当前动作的回报(reward)、探索是否结束(done)及调试帮助信息信息。
def step(self, action):
reward = None
done = False
info = {}
return self.state, reward, done, info
# reset():用于在每轮开始之前重置智能体的状态
def reset(self):
return self.observation_space.sample()
# render():用来绘制画面可视化
def render(self, mode='human'):
return None
# close():用来在程序结束时清理画布
def close(self):
return None
if __name__ == '__main__':
pass | UTF-8 | Python | false | false | 1,344 | py | 414 | tframe.py | 317 | 0.603448 | 0.602371 | 0 | 39 | 22.820513 | 69 |
yimengliu0216/OctConv_DCGAN | 17,231,408,827,575 | 1362394eafcd587c88fff0aacf23b1d31a5b6dc1 | ae9db1e04077a24a2ca0f1ad7163d76cc2dd1419 | /plot_time.py | f54e4d14e62bb08a529103927f42420fb8bacaa3 | [] | no_license | https://github.com/yimengliu0216/OctConv_DCGAN | 6ab47ec98331b60f7ed716c46066cdc8cb02566d | 15da491b298db16c34af27dd110dacadebfb6117 | refs/heads/master | 2020-06-11T13:33:42.265583 | 2019-06-26T21:58:29 | 2019-06-26T21:58:29 | 193,982,994 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
fig_time, ax_time = plt.subplots()
num_epoch = 200
dcgan_time = []
dcgan_g_time = []
dcgan_d_time = []
dcgan_gd_time = []
dcgan_time_avg = 0.0
dcgan_g_time_avg = 0.0
dcgan_d_time_avg = 0.0
dcgan_gd_time_avg = 0.0
with open('time/dcgan_time.txt', 'r') as f:
dcgan_time_str = f.read().split('\n')
for i in range(num_epoch):
dcgan_time.append(float(dcgan_time_str[i]))
dcgan_time_avg += float(dcgan_time_str[i])
dcgan_time_avg /= num_epoch
with open('time/dcgan_time_g.txt', 'r') as f:
dcgan_g_time_str = f.read().split('\n')
for i in range(num_epoch):
dcgan_g_time.append(float(dcgan_g_time_str[i]))
dcgan_g_time_avg += float(dcgan_g_time_str[i])
dcgan_g_time_avg /= num_epoch
with open('time/dcgan_time_d.txt', 'r') as f:
dcgan_d_time_str = f.read().split('\n')
for i in range(num_epoch):
dcgan_d_time.append(float(dcgan_d_time_str[i]))
dcgan_d_time_avg += float(dcgan_d_time_str[i])
dcgan_d_time_avg /= num_epoch
with open('time/dcgan_time_gd.txt', 'r') as f:
dcgan_gd_time_str = f.read().split('\n')
for i in range(num_epoch):
dcgan_gd_time.append(float(dcgan_gd_time_str[i]))
dcgan_gd_time_avg += float(dcgan_gd_time_str[i])
dcgan_gd_time_avg /= num_epoch
ax_time.plot(dcgan_time, label="DCGAN")
ax_time.plot(dcgan_g_time, label="DCGAN_G")
ax_time.plot(dcgan_d_time, label="DCGAN_D")
ax_time.plot(dcgan_gd_time, label="DCGAN_GD")
ax_time.set(xlabel='Epoch', ylabel='Time (s)', title='Training Time')
ax_time.legend()
fig_time.savefig("time/train_time.png")
dcgans_time_avg = [dcgan_time_avg, dcgan_g_time_avg, dcgan_d_time_avg, dcgan_gd_time_avg]
with open('time/dcgans_time_avg.txt', 'a') as f:
f.write(str(dcgans_time_avg))
| UTF-8 | Python | false | false | 1,853 | py | 1 | plot_time.py | 1 | 0.626552 | 0.619536 | 0 | 65 | 27.507692 | 89 |
macbre/wbc.macbre.net | 7,095,286,003,588 | 0da77df582a02a4942169d686d50cd3dd3165d77 | 50c4b11cf0482c404505d1134a3d3f6744da1b6e | /app/wbc/models/__init__.py | 56812e2bc91369854cefcff467368a82ed0d5dfe | [
"MIT"
] | permissive | https://github.com/macbre/wbc.macbre.net | 1c647e18707340850f36c0c64a13bfc04103c6ec | 68ccf6cd437f0dacf3cea6f0c02175efbd992237 | refs/heads/master | 2023-04-07T21:42:44.527275 | 2022-10-19T14:52:36 | 2022-10-19T14:52:36 | 63,534,115 | 0 | 0 | MIT | false | 2023-09-08T02:16:21 | 2016-07-17T13:48:25 | 2021-12-06T11:49:56 | 2023-09-08T02:16:18 | 505 | 0 | 0 | 6 | Python | false | false | from .model import Model
from .documents import DocumentModel
from .issues import IssuesModel
| UTF-8 | Python | false | false | 94 | py | 48 | __init__.py | 31 | 0.840426 | 0.840426 | 0 | 3 | 30.333333 | 36 |
WanliXue/BF_implemation | 2,525,440,806,598 | 8682d05fdb4e92b4f069b8195e0d154dbf657278 | a98c455a318ab2d47b10ef1aa195b7dfd1b5449c | /venv/lib/python2.7/site-packages/cvxpy/constraints/attributes.py | 1def7c027982859b02aa1b7b0a9a1888a19f24c5 | [] | no_license | https://github.com/WanliXue/BF_implemation | ddd463ed906e1f4ee0de492da48bc6de3574bfd0 | 211aa963f3be755858daf03fca5690d3c9532053 | refs/heads/main | 2022-12-26T07:04:05.280651 | 2020-10-13T02:08:55 | 2020-10-13T02:08:55 | 303,561,823 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Copyright 2017 Robin Verschueren
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import inspect
import sys
from cvxpy.constraints import Zero, NonPos, SOC, ExpCone, PSD
def attributes():
"""Return all attributes, i.e. all functions in this module except this function"""
this_module_name = __name__
return [obj for name, obj in inspect.getmembers(sys.modules[this_module_name])
if (inspect.isfunction(obj) and
name != 'attributes')]
def is_qp_constraint(constraint):
if type(constraint) in {Zero, NonPos}:
return True
return False
def is_cone_constraint(constraint):
if type(constraint) in {Zero, NonPos, SOC, ExpCone, PSD}:
return True
return False
def is_ecos_constraint(constraint):
if type(constraint) in {Zero, NonPos, SOC, ExpCone}:
return True
return False
def are_arguments_affine(constraint):
return all(arg.is_affine for arg in constraint.args)
| UTF-8 | Python | false | false | 1,535 | py | 108 | attributes.py | 94 | 0.72443 | 0.721173 | 0 | 52 | 28.519231 | 87 |
claumariut/pythonCrashCourse_exercises | 10,342,281,293,337 | 3ba18050b2256f5afe052d5e0ba94cab3a6d98e3 | ed583e633544e0113413c606f6bc7ca5ad734c45 | /Chapter 4. Working with Lists/foods.py | cc0a5ec38f0d1a5f6953ff6e8946aeaddcea6841 | [] | no_license | https://github.com/claumariut/pythonCrashCourse_exercises | 8b13b82ce9bda81d8435ad12a67b690fee366cba | 1574c4a360591ff19c0da54cfcbad706a296625b | refs/heads/master | 2022-12-22T03:10:52.134345 | 2020-10-01T20:52:27 | 2020-10-01T20:52:27 | 294,251,797 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | my_foods = ['pizza', 'falafel', 'carrot cake']
friend_foods = my_foods[:] # To copy an entire list. We have to use slicing
# always. If not, the list is applied to
# both variables.
print('My favorite foods are:')
for food in my_foods:
print(food)
print("\n My friend's favorite foods are:")
for food in friend_foods:
print(food)
my_foods.append('cannoli')
friend_foods.append('ice cream')
print('My favorite foods are:')
print(my_foods)
print("\n My friend's favorite foods are:")
print(friend_foods)
| UTF-8 | Python | false | false | 531 | py | 94 | foods.py | 91 | 0.677966 | 0.677966 | 0 | 17 | 29.117647 | 76 |
yjyszzr/spider | 1,640,677,539,254 | 0644c7f399abfdc51a66f2daecc61e422f3426e4 | 176c85e338cabd9a02ad21c11eff321609e3efce | /t_spider/dl_asia/dl_asia.py | 8fd7beaabf60597a774f9b04b9ec3c198f23474c | [] | no_license | https://github.com/yjyszzr/spider | 3ebb9d92d87b17cc353b018c1630e5c4541b5f6d | 71d1a94d0f76884974fe5f153f55aef9fbb00bf2 | refs/heads/master | 2020-09-15T18:41:06.910701 | 2019-09-24T10:00:11 | 2019-09-24T10:00:11 | 223,529,689 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import os
while True:
try:
print("爬虫马上启动....")
os.system('scrapy crawl real_ya --nolog')
print("爬虫已完毕,休眠5秒钟")
time.sleep(5)
except:
print("异常错误稍后五秒重试!")
time.sleep(5) | UTF-8 | Python | false | false | 244 | py | 117 | dl_asia.py | 111 | 0.654639 | 0.639175 | 0 | 12 | 15.25 | 43 |
Aleum/MiniGo | 16,458,314,709,672 | 56a909080c4a9e38ebc5fc232963fdbd02313c95 | 3e75b15d582c801a168d3037f813a198da7b6152 | /play_DCL/feature/utility.py | ab85ba6f6a42d62ab3d24432904fe747062d0478 | [] | no_license | https://github.com/Aleum/MiniGo | db270ec485c4202a584a0a6dd6f27ab934614d7d | fe8d780c573e38e787ca935ffa9557a4ec5b32ac | refs/heads/master | 2020-12-25T11:05:35.723433 | 2016-07-27T06:38:32 | 2016-07-27T06:38:32 | 61,085,318 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
utility.py
"""
import os
import os.path
from SGFGame import SGFGame
def filenames_in(path, extensions):
for entryname in os.listdir(path):
entrypath = os.path.abspath(os.path.join(path, entryname))
entry_ext = os.path.splitext(entrypath)[1][1:]
if os.path.isfile(entrypath) and (entry_ext in [ext.replace(".", "") for ext in extensions.split(";")]):
yield entrypath
def print_features(feature_map):
for row_index in range(feature_map.rows):
row = "".join([(value or ".") for value in feature_map.board[row_index]])
row += "\t"
row += "\t".join(["".join(["{0}".format(value or ".") for value in feature[row_index]]) for feature in feature_map.features])
print(row)
def print_board(board):
for row_index in range(board.rows):
print("".join([(value or ".") for value in board[row_index]]))
def print_feature(feature):
for row_index in range(len(feature)):
print("".join(["{0}".format(value or ".") for value in feature[row_index]]))
def print_int_feature(board, feature):
for row_index in range(board.rows):
row = "".join([(value or ".") for value in board[row_index]])
row += "\t"
row += " ".join(["{0:3}".format(value or "...") for value in feature[row_index]])
print(row)
| UTF-8 | Python | false | false | 1,379 | py | 83 | utility.py | 80 | 0.588832 | 0.583756 | 0 | 38 | 35.289474 | 133 |
kmulrey/particle_bias_test | 1,803,886,273,706 | 8ed2c80a32e08b2b54e76af8bc3c2fd80529f609 | 41a936256682758d3583edecbbc6b1418912986d | /run_bias.py | c7a29f7df3987d2f30868bb1dc81d2cf320f1fce | [] | no_license | https://github.com/kmulrey/particle_bias_test | 77fd3dfcf36223e5c4bbc54995051d506b30ba72 | b8152db3e5b3b2b9fbe99cb965e80b57aa121fb9 | refs/heads/master | 2020-09-22T14:01:46.422078 | 2019-12-01T23:48:28 | 2019-12-01T23:48:28 | 225,231,026 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import check_bias_new19nov as bias
import numpy as np
import trigger
path='/vol/astro3/lofar/sim/pipeline/events/'
'''
for example:
event number: 196034009
zenith=90-63.97
azimuth=307.92
core x=-105.58
core y=-46.23
'''
coreas_path='/vol/astro3/lofar/sim/pipeline/events/196034009/0/coreas/'
zenith=26.0*np.pi/180
azimuth=307.9*np.pi/180
xcore=-105.58
ycore=-46.23
## selection of events with different trigger criteria
#event=196034009
event=93990550
#event=95749948
#event=87892283
#event=272981612
#event=174634099
working_detectors, global_trigger_condition, local_trigger_condition, trigger_type=trigger.find_trigger(event)
# working detectors= array of 0/1 for each scintillator based on daily lora counts
# trigger type = 'd' for detector (#/20) or 's' for station (#/5)
# global trigger condition= #/20 detectors or #/5 stations
# local trigger contition = lora station condition. normally 3/4, except some instances where it was changed to 2/4 if one detector was broken
print '_____________________________'
print trigger_type
print working_detectors
print global_trigger_condition
print local_trigger_condition
print '_____________________________'
ntrials=500
min_percentage_trigger=95.0
(bias_passed, min_chance_of_hit, chance_of_hit_all_sims) = bias.find_bias(coreas_path, zenith, azimuth, xcore, ycore, ntrials, working_detectors,trigger_type,local_trigger_condition=local_trigger_condition,min_percentage_trigger=min_percentage_trigger, trigger_condition_nof_detectors=global_trigger_condition)
print bias_passed
print min_chance_of_hit
print chance_of_hit_all_sims
| UTF-8 | Python | false | false | 1,599 | py | 6 | run_bias.py | 5 | 0.749844 | 0.666041 | 0 | 57 | 27.052632 | 310 |
JanMalte/secondhandshop_server | 4,621,384,832,681 | 23f25268d4b9bd8606da2595557ccb09cea06acb | 178512c82b4b9513e44a8a692eca2fc50cee9b7d | /src/pos/tests.py | dbb2fd490556e7fa8d65cb0348a40c4a99e58fd1 | [
"MIT"
] | permissive | https://github.com/JanMalte/secondhandshop_server | 192d203a8b90673a555bf037bc81b634750ed6a9 | 879b6c5631c62bc8617c4290170e7cbd3ad6b31c | refs/heads/master | 2020-04-05T22:47:05.344271 | 2019-03-04T08:46:41 | 2019-03-04T08:46:41 | 42,958,798 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from django_webtest import WebTest
from events.models import Event
from .models import Cart
class CartAdminTest(WebTest):
def setUp(self):
self.event = G(Event, is_active=True)
self.admin = G(get_user_model(), is_superuser=True, is_staff=True)
def test_article_list(self):
url = reverse("admin:pos_cart_changelist")
G(Cart)
response = self.app.get(url, user=self.admin)
# FIXME test list display fields
| UTF-8 | Python | false | false | 593 | py | 136 | tests.py | 80 | 0.703204 | 0.703204 | 0 | 20 | 28.65 | 74 |
13g10n/django.handbook | 5,961,414,611,550 | b9b00ea4977e995243c2b9b00c2a89740f78d23d | 370d4f6e8d41a439f0afd99e406f4523b20afedf | /config/views.py | fe5de02b787595a26ccd25618ba6e663e07d63e0 | [] | no_license | https://github.com/13g10n/django.handbook | 15f39e1213d025a72682dbbd8c4c48a201b5bea0 | e6e37704c0c58b17cea978a35ad5d969624ac07d | refs/heads/master | 2021-08-11T08:41:22.402526 | 2017-11-13T12:28:11 | 2017-11-13T12:28:11 | 107,716,768 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import get_user_model
from django.views.generic import TemplateView
class TestView(TemplateView):
template_name = "test.html"
def get_context_data(self, **kwargs):
context = super(TestView, self).get_context_data()
| UTF-8 | Python | false | false | 261 | py | 52 | views.py | 50 | 0.724138 | 0.724138 | 0 | 9 | 27.777778 | 58 |
Ma-Jun-a/my_funny_exploration | 6,914,897,361,339 | c480364094f8de99c3627c38b6fba554047055f3 | 0a8dfb6e1b021a132bd1f256a03dfd3c50746a4a | /selenuim_.py/runner_test_.py | 5c4c7e4c3861ba1d917ab62b503c8c7aa872e19b | [] | no_license | https://github.com/Ma-Jun-a/my_funny_exploration | 54c71a9602a0f6cee6fbc2536d1e872ad766a543 | e220e2dd18d843d21c77a2554335fbc011fe6a9d | refs/heads/master | 2023-05-12T14:09:45.912442 | 2021-03-12T03:40:26 | 2021-03-12T03:40:26 | 207,444,731 | 0 | 0 | null | false | 2023-05-01T20:48:14 | 2019-09-10T02:10:17 | 2021-03-12T03:40:46 | 2023-05-01T20:48:13 | 125 | 0 | 0 | 1 | Python | false | false | import unittest
from utils.HTMLTestRunner import HTMLTestRunner
class MytestCase(unittest.TestCase):
def test0(self):
print('当然')
def test1(self):
print('test1')
suit = unittest.TestSuite()
# suit1 = unittest.defaultTestLoader(uuuu,)
suit.addTest(unittest.makeSuite(MytestCase))
with open('./reports/登陆测试','wb') as f:
runner = HTMLTestRunner(stream=f,tittle='***',description='***')
runner.run(suit) | UTF-8 | Python | false | false | 465 | py | 78 | runner_test_.py | 73 | 0.668874 | 0.660044 | 0 | 18 | 23.277778 | 68 |
FifiTheBulldog/shortcuts-permissions | 2,860,448,237,236 | ac2a01645b933af16759d8b0c7da344314e3d4b7 | c2719506930b056f2c9bafaf9e4672919b02f0d6 | /scan_shortcut.py | a8224f8d2cc811856c18a7032be009c73d26904f | [
"MIT"
] | permissive | https://github.com/FifiTheBulldog/shortcuts-permissions | 7001377350dcefd141265647b105184ea6d5326d | 688e553a7906e78db855b92b75149528500cae51 | refs/heads/main | 2023-05-08T12:16:06.658936 | 2021-06-03T04:29:36 | 2021-06-03T04:29:36 | 343,220,498 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # scan_shortcut.py
#
# Exports scan_shortcut, a function that returns an array of permissions
# for a shortcut. The function accepts a .shortcut file as a bytes object.
import json
import plistlib
ACTIONS_PATH = "./actions.json"
with open(ACTIONS_PATH) as file:
action_list = json.load(file)
def scan_shortcut(plist):
'''Accepts a bytes object containing a shortcut, and returns a list of the shortcut's required permissions.'''
actions = plistlib.loads(plist)["WFWorkflowActions"]
perms = []
for action in actions:
action_id = action["WFWorkflowActionIdentifier"]
if action_id in action_list:
for perm in action_list[action_id]:
if not perm in perms:
perms.append(perm)
perms.sort()
return perms | UTF-8 | Python | false | false | 804 | py | 14 | scan_shortcut.py | 9 | 0.664179 | 0.664179 | 0 | 29 | 26.758621 | 114 |
dr-dos-ok/Code_Jam_Webscraper | 7,232,724,949,447 | 92e12d7ceb45ad8822386de166e5bc295b7ac848 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/683.py | 1ce2365b57e9ac08be18246a000f65b2373998f4 | [] | no_license | https://github.com/dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import numpy as np
inFile = open('in.txt', 'r')
outFile = open('out.txt', 'w')
t = int(inFile.readline())
for test in range(1, t+1):
N, R, O, Y, G, B, V = map(int, inFile.readline().split(' '))
ans = ['']*N
available = {'R': R,
'Y': Y,
'B': B,
'G': G,
'O': O,
'V': V}
keys = dict((v, k) for k, v in available.iteritems())
fill = max(available.values())
toUse = keys[fill]
# possible = True
remaining = ['R', 'Y', 'B']
if(fill > N/2):
outFile.write("Case #{}: IMPOSSIBLE\n".format(test))
# print 'IMPOSSIBLE'
continue
positions = [2*x for x in range(fill)]
last = positions[-1] + 2
# print toUse
# print positions
for x in positions:
ans[x] = toUse
# print ans
remaining.remove(toUse)
toUse = remaining[0]
fill = available[toUse]
nextPos = last
positions = []
for i in range(fill):
if(nextPos > N-1):
nextPos = 1
positions.append(nextPos)
nextPos += 2
# print toUse
# print positions
for x in positions:
ans[x] = toUse
# print ans
toUse = remaining[-1]
# prev = -5
for i in range(len(ans)):
# cur = 0
if(not ans[i]):
# cur = i
ans[i] = toUse
outFile.write("Case #{}: {}\n".format(test, ''.join(ans)))
| UTF-8 | Python | false | false | 1,441 | py | 60,747 | 683.py | 60,742 | 0.489244 | 0.480222 | 0 | 55 | 25.2 | 64 |
Imrager/Family-Guy-Episode-Finder | 14,027,363,205,332 | e7f8da8a097e9b18b3e22f4af019b01465b92077 | 2519cf1e81b59deaaf17c6f830f39a8afa279ed1 | /tunr_app/serializers.py | ab651f2daadc2fde1eee71e7a584f70b06d855d0 | [] | no_license | https://github.com/Imrager/Family-Guy-Episode-Finder | 9471c6806141eb5183a47dc49958f6d2310008dc | 8f9d9451e5c413f6d3544e9b9f9d1971a8abe248 | refs/heads/master | 2023-01-10T13:22:20.268413 | 2019-06-18T17:05:04 | 2019-06-18T17:05:04 | 190,751,296 | 0 | 1 | null | false | 2023-01-03T23:38:07 | 2019-06-07T13:50:59 | 2019-06-18T17:05:13 | 2023-01-03T23:38:07 | 1,765 | 0 | 1 | 29 | JavaScript | false | false | from rest_framework import serializers
from .models import User, Review, Comment
# class SongSerializer(serializers.ModelSerializer):
# class Meta:
# model = Song
# fields = ('id', 'title', 'album', 'preview_url', 'artist')
# class ArtistSerializer(serializers.ModelSerializer):
# songs = SongSerializer(many=True, read_only=True)
# class Meta:
# model = Artist
# fields = ('id', 'name', 'photo_url', 'nationality', 'songs')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('id', 'reply', 'review')
class ReviewSerializer(serializers.ModelSerializer):
comments = CommentSerializer(many=True, read_only=True)
class Meta:
model = Review
fields = ('id', 'review', 'user', 'comments', "episode")
class UserSerializer(serializers.ModelSerializer):
reviews = ReviewSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ('id', 'name', 'password', 'image', 'reviews')
| UTF-8 | Python | false | false | 1,041 | py | 20 | serializers.py | 13 | 0.650336 | 0.650336 | 0 | 32 | 31.5 | 70 |
DanielBetancourt1/Reverse-Engineering-applied-to-a-MicroScribe-device-and-development-of-a-GUI | 8,349,416,466,008 | 534ebc04f468b1479ccf17f39ed42f4bec963500 | da3e06bc7862d301eec29ab9b928be96c0a5bfda | /Python project/Protocolo.py | a9471c250bc3c99a7f9450b085b495abb43b648a | [] | no_license | https://github.com/DanielBetancourt1/Reverse-Engineering-applied-to-a-MicroScribe-device-and-development-of-a-GUI | 89bd4367f67e15560041af6f6f705b1fbc7671dc | ba0176c6b0046b054d0b59cea0d4e1efa36bc4bb | refs/heads/master | 2020-05-29T19:25:02.652832 | 2019-05-30T21:02:36 | 2019-05-30T21:02:36 | 189,329,590 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# - This script function start communication with the MS by means of special commands defined for the MS,
# some of this commands returns relevant information about the MS like physical parameters.
import serial, time, sys
import Maxes as Mx
import PhysicalParameters as PhP
try: # Link between Host and MicroScribe (Writing commands and reading data).
def protocolo(TxRx):
TxRx.flushInput() # flush input buffer, discarding all its contents
TxRx.flushOutput() # flush output buffer, aborting current output
TxRx.write(b'BEGIN') # - Start linking
D_ID = Comm(TxRx)
D_ID = D_ID.decode("utf-8")
# print("Device ID: ", D_ID, '\n') # MSCR
TxRx.write(bytes.fromhex('C8')) # Get product name
Pnm = str(Comm(TxRx))[6:-1]
# print("Product Name: ", Pnm, '\n') # ÈMicroScribe3D.
TxRx.write(bytes.fromhex('C9')) # Get Product ID
P_ID2 = Comm(TxRx)
# print("Product ID2: ", P_ID2, '\n') # ÉMSCR.
TxRx.write(bytes.fromhex('CA')) # Get Model Name
MN = str(Comm(TxRx))[6:-1]
# print("Model Name: ", MN, '\n') # ÊDX.
TxRx.write(bytes.fromhex('CB')) # Get Serial Number
SN = str(Comm(TxRx))[6:-1]
# print("Serial Number: ", SN, '\n') # Ë40937.
TxRx.write(bytes.fromhex('CC')) # Get Comment string
CS = str(Comm(TxRx))[6:-1]
# print("Comments: ", CS, '\n') # ÌStandard+Beta.
TxRx.write(bytes.fromhex('CD')) # Get parameter format
PF = str(Comm(TxRx))[6:-1]
# print("Parameter format: Denavit-Hartenberg form 0.5: ", PF, '\n') # ÍFormat DH0.5.
TxRx.write(bytes.fromhex('CE')) # Get version
FV = str(Comm(TxRx))[6:-1]
# print("Firmware Version: ", FV, '\n') # ÎHCI 2.0.
TxRx.write(bytes.fromhex('C6')) # Get pulses/ rev values for each encoder.
ME = Comm(TxRx)
print("Pulses per revolution of the encoders: ")
Encoderfactor = Mx.PulseRev(ME) # Pulses/Rev
print('#------------------------------#')
TxRx.write(bytes.fromhex('C0')) # Request extra parameters to compute
# all positions and orientations (Needed cause the comment is Standard+Beta)
EP = Comm(TxRx)
print("Physical Parameters: \n")
TxRx.write(bytes.fromhex('D3')) # Get Extra Extended Physical Parameters.
EEP = Comm(TxRx)
print("Extended Physical Parameters: ", '\n')
[cA, sA, A, D, cB, sB] = PhP.PhParameters(EP, EEP)
# TxRx.write(bytes.fromhex('D1')) # set home ref
# EB = Comm(TxRx)
# print("Encoder bits: ", EB)
# REPORT_MOTION 0xCF
# SET_HOME_REF 0xD0
# RESTORE_FACTORY 0xD1
# INSERT_MARKER 0xD2
# GET_EXT_PARAMS 0xD3
return Encoderfactor, cA, sA, A, D, cB, sB, D_ID, Pnm, MN, SN, CS, PF, FV
# ---------------------------------------------------------- #
# - This short function ask for the quantity of data in the buffer and read it.
def Comm(TxRx):
TxRx.inWaiting()
time.sleep(0.10)
Rm = TxRx.read(TxRx.inWaiting()) # Read response data from MicroScribe.
return Rm
except KeyboardInterrupt:
TxRx.write(b'END')
TxRx.flushInput() # flush input buffer, discarding all its contents
TxRx.flushOutput() # flush output buffer, aborting current output
TxRx.flush()
TxRx.close()
print('Communication was interrupted manually', '\n')
except serial.portNotOpenError:
print('You are trying to use a port that is not open', '\n')
| UTF-8 | Python | false | false | 3,701 | py | 18 | Protocolo.py | 12 | 0.568219 | 0.555766 | 0 | 98 | 35.673469 | 105 |
db7777/python-practice | 781,684,050,722 | 2fb9b4630c1aa4068932d652be02b8a0c3d74dda | f8410cc79645b7a7ef67132b92bbb945e7b0c43c | /csvread/cvsread.py | ac2ea34618c268aeb8a4b26db3f5d2eed5d80a91 | [] | no_license | https://github.com/db7777/python-practice | 85aab118b71b80c89f641d8429ecc29b0f6f7d17 | acbbe7cbb05e18b9b566f4763083c7b6f9c73841 | refs/heads/master | 2016-08-07T02:00:17.388509 | 2015-08-30T15:15:20 | 2015-08-30T15:15:20 | 41,615,722 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! python3
__author__ = 'DB'
import csv
def readToList():
exampleFile = open('example.csv')
exampleReader = csv.reader(exampleFile)
exampleData = list(exampleReader) # the easies way to load into array
print(exampleData)
def readToIterate():
exampleFile = open('example.csv')
exampleReader = csv.reader(exampleFile)
for row in exampleReader:
print('Row #' + str(exampleReader.line_num) + ' ' + str(row))
readToList()
readToIterate() | UTF-8 | Python | false | false | 477 | py | 2 | cvsread.py | 2 | 0.672956 | 0.67086 | 0 | 20 | 22.9 | 73 |
GRSEB9S/linconfig | 5,772,436,066,136 | 1cb33417b2e8ee38ebf7081c1268423d85e0d938 | 79424b68bf129c5a5171494120f7e315a76180b3 | /qgis/qgis2/python/plugins/profiletool/ui/ui_ptdockwidget.py | 5f41812aacee07b58430411362ece027d2765d0c | [] | no_license | https://github.com/GRSEB9S/linconfig | a016937412662db878b0c923e5147f91875604b7 | 84bc9117bb8e6109ef7fd10bf73354d4cf148c36 | refs/heads/master | 2021-09-08T03:17:02.648452 | 2018-03-06T16:32:18 | 2018-03-06T16:32:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# Profile
# Copyright (C) 2012 Patrice Verchere
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, print to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from profiletool import Ui_ProfileTool
from ..tools.plottingtool import *
#from ..profileplugin import ProfilePlugin
try:
from PyQt4.Qwt5 import *
Qwt5_loaded = True
except ImportError:
Qwt5_loaded = False
try:
from matplotlib import *
import matplotlib
matplotlib_loaded = True
except ImportError:
matplotlib_loaded = False
import platform
class Ui_PTDockWidget(QDockWidget,Ui_ProfileTool):
TITLE = "MirrorMap"
def __init__(self, parent, iface1, mdl1):
QDockWidget.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
#self.mainWidget = MirrorMap(self, iface)
self.location = Qt.RightDockWidgetArea
self.iface = iface1
self.setupUi(self)
#self.connect(self, SIGNAL("dockLocationChanged(Qt::DockWidgetArea)"), self.setLocation)
self.mdl = mdl1
#self.showed = False
QObject.connect(self.butSaveAs, SIGNAL("clicked()"), self.saveAs)
def showIt(self):
#self.setLocation( Qt.BottomDockWidgetArea )
self.location = Qt.BottomDockWidgetArea
minsize = self.minimumSize()
maxsize = self.maximumSize()
self.setMinimumSize(minsize)
self.setMaximumSize(maxsize)
self.iface.mapCanvas().setRenderFlag(False)
#TableWiew thing
self.tableView.setModel(self.mdl)
self.tableView.setColumnWidth(0, 20)
self.tableView.setColumnWidth(1, 20)
self.tableView.setColumnWidth(2, 150)
hh = self.tableView.horizontalHeader()
hh.setStretchLastSection(True)
self.tableView.setColumnHidden(4 , True)
self.mdl.setHorizontalHeaderLabels(["","","Layer","Band"])
#self.checkBox.setEnabled(False)
#The ploting area
self.plotWdg = None
#Draw the widget
self.iface.addDockWidget(self.location, self)
self.iface.mapCanvas().setRenderFlag(True)
def addOptionComboboxItems(self):
#self.comboBox.addItem("Temporary polyline")
#self.comboBox.addItem("Selected polyline")
if Qwt5_loaded:
self.comboBox_2.addItem("Qwt5")
if matplotlib_loaded:
self.comboBox_2.addItem("Matplotlib")
def closeEvent(self, event):
self.emit( SIGNAL( "closed(PyQt_PyObject)" ), self )
QObject.disconnect(self.butSaveAs, SIGNAL("clicked()"), self.saveAs)
return QDockWidget.closeEvent(self, event)
def addPlotWidget(self, library):
layout = self.frame_for_plot.layout()
while layout.count():
child = layout.takeAt(0)
child.widget().deleteLater()
if library == "Qwt5":
self.stackedWidget.setCurrentIndex(0)
widget1 = self.stackedWidget.widget(1)
if widget1:
self.stackedWidget.removeWidget( widget1 )
widget1 = None
#self.widget_save_buttons.setVisible( True )
self.plotWdg = PlottingTool().changePlotWidget("Qwt5", self.frame_for_plot)
layout.addWidget(self.plotWdg)
if QT_VERSION < 0X040100:
idx = self.cbxSaveAs.model().index(0, 0)
self.cbxSaveAs.model().setData(idx, QVariant(0), Qt.UserRole - 1)
self.cbxSaveAs.setCurrentIndex(1)
if QT_VERSION < 0X040300:
idx = self.cbxSaveAs.model().index(1, 0)
self.cbxSaveAs.model().setData(idx, QVariant(0), Qt.UserRole - 1)
self.cbxSaveAs.setCurrentIndex(2)
elif library == "Matplotlib":
self.stackedWidget.setCurrentIndex(0)
#self.widget_save_buttons.setVisible( False )
self.plotWdg = PlottingTool().changePlotWidget("Matplotlib", self.frame_for_plot)
layout.addWidget(self.plotWdg)
mpltoolbar = matplotlib.backends.backend_qt4agg.NavigationToolbar2QTAgg(self.plotWdg, self.frame_for_plot)
#layout.addWidget( mpltoolbar )
self.stackedWidget.insertWidget(1, mpltoolbar)
self.stackedWidget.setCurrentIndex(1)
lstActions = mpltoolbar.actions()
mpltoolbar.removeAction( lstActions[ 7 ] )
mpltoolbar.removeAction( lstActions[ 8 ] )
# generic save as button
def saveAs(self):
idx = self.cbxSaveAs.currentIndex()
if idx == 0:
self.outPDF()
elif idx == 1:
self.outPNG()
elif idx == 2:
self.outSVG()
elif idx == 3:
self.outPrint()
else:
print('plottingtool: invalid index '+str(idx))
def outPrint(self): # Postscript file rendering doesn't work properly yet.
PlottingTool().outPrint(self.iface, self, self.mdl, self.comboBox_2.currentText ())
def outPDF(self):
PlottingTool().outPDF(self.iface, self, self.mdl, self.comboBox_2.currentText ())
def outSVG(self):
PlottingTool().outSVG(self.iface, self, self.mdl, self.comboBox_2.currentText ())
def outPNG(self):
PlottingTool().outPNG(self.iface, self, self.mdl, self.comboBox_2.currentText ())
| UTF-8 | Python | false | false | 6,256 | py | 168 | ui_ptdockwidget.py | 125 | 0.623561 | 0.609015 | 0 | 177 | 34.327684 | 109 |
wfs123456/GCANet | 10,642,929,007,680 | 9ef8a33ca5d2645c4971c5c6ed26c6c6cf7483f6 | 623400be7830f222682dfb3ff4f8b20dbcf93284 | /model.py | 7ee7418f1ebefaa1f6c33ced22f83a29f29c323f | [] | no_license | https://github.com/wfs123456/GCANet | a30b955c01a27d1fbd2965261d1297bbd668c403 | dfc9971a158100916c7a58ac88e1c7cd0e74faa7 | refs/heads/main | 2023-05-13T06:22:17.658150 | 2021-06-06T06:13:00 | 2021-06-06T06:13:00 | 373,827,845 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # _*_ coding: utf-8 _*_
# @author : 王福森
# @time : 2021/4/3 13:46
# @File : model.py
# @Software : PyCharm
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch
import os
def make_layers(cfg, in_channels, batch_norm=True):
layers = []
# in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def conv(in_channel, out_channel, kernel_size, dilation=1, bn=True):
#padding = 0
# if kernel_size % 2 == 1:
# padding = int((kernel_size - 1) / 2)
padding = dilation # maintain the previous size
if bn:
return nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding, dilation=dilation,),
nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding, dilation=dilation,),
# nn.BatchNorm2d(out_channel, momentum=0.005),
nn.ReLU(inplace=True)
)
# class Inception(nn.Module):
# def __init__(self,in_channel):
# super(Inception, self).__init__()
# self.conv1x1 = nn.Sequential(nn.Conv2d(in_channel,64,1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
# self.conv2x1 = nn.Sequential(nn.Conv2d(in_channel,64, 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
# self.conv3x1 = nn.Sequential(nn.Conv2d(in_channel, 128, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True))
# self.conv4x1 = nn.Sequential(nn.Conv2d(in_channel, 256, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))
#
# self.conv2x2 = nn.Sequential(nn.Conv2d(64,64,3,padding=2,dilation=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
# self.conv3x2 = nn.Sequential(nn.Conv2d(128,128,5,padding=4,dilation=2), nn.BatchNorm2d(128), nn.ReLU(inplace=True))
# self.conv4x2 = nn.Sequential(nn.Conv2d(256,256,7,padding=6,dilation=2), nn.BatchNorm2d(256), nn.ReLU(inplace=True))
# self.init_param()
# def forward(self, x):
# x1 = self.conv1x1(x)
# x2 = self.conv2x2(self.conv2x1(x))
# x3 = self.conv3x2(self.conv3x1(x))
# x4 = self.conv4x2(self.conv4x1(x))
# x = torch.cat((x1,x2,x3,x4),1)
# return x
#
# def init_param(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.normal_(m.weight, std=0.01)
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
class AttenModule(nn.Module):
def __init__(self,in_channel,out_channel):
super(AttenModule, self).__init__()
self.attention = nn.Sequential(nn.Conv2d(in_channel, 1, 3,padding=1, bias=True),
nn.Sigmoid()
)
self.conv1 = nn.Sequential(nn.Conv2d(in_channel, out_channel, 3, padding=2, dilation=2, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True),
)
self.init_param()
def forward(self, x):
atten = self.attention(x)
features = self.conv1(x)
x = features * atten
return x, atten
def init_param(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class SPPSELayer(nn.Module):
def __init__(self,in_cahnnel, channel, reduction=16):
super(SPPSELayer, self).__init__()
self.avg_pool1 = nn.AdaptiveAvgPool2d(1)
self.avg_pool2 = nn.AdaptiveAvgPool2d(2)
self.avg_pool4 = nn.AdaptiveAvgPool2d(4)
self.fc = nn.Sequential(
nn.Linear(in_cahnnel*21, in_cahnnel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(in_cahnnel // reduction, channel, bias=False),
nn.Sigmoid()
)
self.conv2 = nn.Sequential(nn.Conv2d(in_cahnnel, channel, 1),
nn.BatchNorm2d(channel),
nn.ReLU(inplace=True))
def forward(self, x):
b, c, _, _ = x.size() # b: number; c: channel;
y1 = self.avg_pool1(x).view(b, c) # like resize() in numpy
y2 = self.avg_pool2(x).view(b, 4 * c)
y3 = self.avg_pool4(x).view(b, 16 * c)
y = torch.cat((y1, y2, y3), 1)
y = self.fc(y)
b,out_channel = y.size()
y = y.view(b, out_channel, 1, 1)
x = self.conv2(x)
y = y * x
return y
def init_param(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
# cfg1 = [64, 64, 'M', 128, 128, 'M', 256, 256, 256]
cfg2 = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
# self.front_end1 = make_layers(cfg1, 3, batch_norm=True)
self.front_end2 = make_layers(cfg2, 3, batch_norm=True)
# self.Inception1 = Inception(512)
self.attenModule1 = AttenModule(512, 256)
self.attenModule2 = AttenModule(256, 128)
self.attenModule3 = AttenModule(128, 64)
self.SPPSEMoudule1 = SPPSELayer(512,256)
self.SPPSEMoudule2 = SPPSELayer(256, 128)
self.SPPSEMoudule3 = SPPSELayer(128, 64)
self.ReduConv1 = conv(512, 256, 3, dilation=1)
self.ReduConv2 = conv(256, 128, 3, dilation=2)
self.ReduConv3 = conv(128, 64, 3, dilation=3)
# self.A_conv = nn.Sequential(conv(512, 128, 3), conv(128, 64, 3))
# self.final = nn.Sequential(nn.Conv2d(64, 1, 1), nn.ReLU())
self.final = nn.Conv2d(64,1,1)
self.init_param()
def init_param(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
print("loading pretrained vgg16_bn!")
if os.path.exists("/home/liuqi/PycharmProjects/SPPSENet/weights/vgg16_bn.pth"):
print("find pretrained weights!")
vgg16_bn = models.vgg16_bn(pretrained=False)
vgg16_weights = torch.load("/home/liuqi/PycharmProjects/SPPSENet/weights/vgg16_bn.pth")
vgg16_bn.load_state_dict(vgg16_weights)
else:
vgg16_bn = models.vgg16_bn(pretrained=True)
# the front conv block's parameter no training
# for p in self.front_end1.parameters():
# p.requires_grad = False
# self.front_end1.load_state_dict(vgg16_bn.features[:23].state_dict())
self.front_end2.load_state_dict(vgg16_bn.features[:33].state_dict())
def forward(self, x, vis=False):
# y = self.front_end1(x)
#dense block
x = self.front_end2(x)
x1,atten1 = self.attenModule1(x)
y1 = self.SPPSEMoudule1(x)
x = torch.cat((x1,y1), 1)
x = self.ReduConv1(x)
x2,atten2 = self.attenModule2(x)
y2 = self.SPPSEMoudule2(x)
x = torch.cat((x2, y2), 1)
x = self.ReduConv2(x)
x3, atten3 = self.attenModule3(x)
y3 = self.SPPSEMoudule3(x)
x = torch.cat((x3, y3), 1)
x = self.ReduConv3(x)
x = self.final(x)
# att = F.interpolate(att, scale_factor=8, mode="nearest", align_corners=None)
# x = F.interpolate(x, scale_factor=8, mode="nearest", align_corners=None)
if vis:
return x, atten1, atten2, atten3
return x
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
net = Net()
# print(net.front_end.state_dict())
x = torch.ones((16, 3, 128, 128))
print(x.size())
y= net(x)
print(y.size()) | UTF-8 | Python | false | false | 8,994 | py | 13 | model.py | 12 | 0.549733 | 0.500445 | 0 | 234 | 37.41453 | 125 |
AndreaCossio/sem2.0 | 10,651,518,923,520 | 2b039a9536f4fbd5197c614e49e35e831729be34 | 8230dba89f8392cb57fd7227d78be3a20157e336 | /SEM 2.0/2.0.2/RADAR 0.2.py | 9461bfa0efdaa27cd684e6e765163cc51e0bbd8b | [] | no_license | https://github.com/AndreaCossio/sem2.0 | febfac72715e9a4834cae2c96adc691881bd7b07 | c256a52b1b7af915d29750638125329bddfc86de | refs/heads/master | 2020-04-13T05:15:49.003281 | 2017-08-04T07:54:54 | 2014-12-02T13:00:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import serial
import time
import pygame, sys
from pygame.locals import *
from pygame.color import THECOLORS
import random
pygame.init()
pygame.display.set_caption('ARDUINO-PROJECT')
screen = pygame.display.set_mode([600, 600])
screen.fill ([255, 255, 255])
class radar():
def __init__(self, x, y, color, radius, width, speed):
self.position = (x, y)
self.width = width
self.color = color
self.image = pygame.draw.circle(screen, color, (x, y), width, width)
self.speed = speed
self.seconds = time.time()
self.state = time.time()
self.radius = width
self.max_radius = radius
def get_enable(self):
self.seconds = time.time()
if int(self.seconds) != int(self.state):
new_radius = int(self.seconds - self.state)*self.speed + self.radius
if new_radius > self.max_radius:
self.radius = new_radius
self.radius = self.width
self.seconds = time.time()
self.state = time.time()
return False
else:
return True
else:
return True
def run(self):
self.seconds = time.time()
if int(self.seconds) != int(self.state):
new_radius = int(self.seconds - self.state)*self.speed + self.radius
self.radius = new_radius
if self.radius > self.max_radius:
self.radius = self.width
self.seconds = time.time()
self.state = time.time()
if self.width >= self.radius:
self.radius = self.width
Surface = pygame.Surface((self.radius * 2, self.radius * 2))
Surface.fill ([255, 255, 255])
Surface.set_colorkey((255, 255, 255))
self.image = pygame.draw.circle(Surface, self.color, [self.radius, self.radius], int(self.radius), self.width)
alpha =((self.max_radius - self.radius)/self.max_radius)**(0.5) * 255
Surface.set_alpha(alpha)
screen.blit(Surface, (self.position[0] - self.radius, self.position[1] - self.radius))
class unit():
def __init__(self, x, y, color, radius, width, speed):
self.rect = pygame.draw.rect(screen, [255, 255, 255] ,[x, y , 1, 1], 1)
self.object = radar(x, y, color, radius, width, speed)
self.position = [x, y]
self.enable = False
def run(self):
if self.enable:
self.enable = self.object.get_enable()
if self.enable:
self.object.run()
clock = pygame.time.Clock()
green_radar = radar(300, 300, [0, 204, 0], 300, 2, 0.2)
click = 0
radar_list = []
unit_list = []
while True:
screen.fill([255, 255, 255])
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit ()
elif event.type == MOUSEBUTTONDOWN:
(pos_x, pos_y) = pygame.mouse.get_pos()
object = unit(pos_x, pos_y, [0, 102, 255], 50, 2, 0.05)
unit_list.append(object)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
position_x = random.randint (0, 600)
position_y = random.randint (0, 600)
position_m = (position_x + position_y)/2
width = random.randint(1, 5)
radius = random.randint(5, position_m)
speed = random.randint(1, 99) / 100.0
object = radar(position_x, position_y, THECOLORS[random.choice(THECOLORS.keys())], radius, width, speed)
radar_list.append(object)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_DELETE:
unit_list = []
radar_list = []
green_radar.run()
for unit_object in unit_list:
#print ((unit_object.position[0]-green_radar.position[0])**2 + (unit_object.position[1]-green_radar.position[1])**2) - (int(green_radar.radius)**2)
if abs(((unit_object.position[0]-green_radar.position[0])**2 + (unit_object.position[1]-green_radar.position[1])**2) - (int(green_radar.radius)**2))<= green_radar.radius:
#if unit_object.rect.colliderect(green_radar.image):
unit_object.run()
unit_object.enable = True
if unit_object not in radar_list:
radar_list.append(unit_object)
for radar_object in radar_list:
radar_object.run()
clock.tick(100)
pygame.display.update()
click += 1 | UTF-8 | Python | false | false | 4,400 | py | 24 | RADAR 0.2.py | 19 | 0.575909 | 0.546136 | 0 | 110 | 39.009091 | 178 |
diegord13/programas | 13,013,750,914,227 | 65e2813aae5475b608a85300e75e950db75b9bee | cfd41ae22d3586ac247b1f3fc4e5946a71cb13f4 | /Interfaz_grafica/raiz.py | c1e5e316d0722a515bbf2f95b04c7dc5cb15a44b | [] | no_license | https://github.com/diegord13/programas | c29d7fcfd3d43e45e073d3d7290fb4f7074bfa4b | cf402879047a0b65eddbff302537bd0b700d1067 | refs/heads/master | 2023-08-15T15:12:17.501460 | 2021-10-21T22:49:01 | 2021-10-21T22:49:01 | 407,954,837 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter
raiz = tkinter.Tk()
raiz.title("mi programa")
raiz.mainloop() | UTF-8 | Python | false | false | 79 | py | 78 | raiz.py | 72 | 0.721519 | 0.721519 | 0 | 7 | 10.428571 | 25 |
ActorExpose/prexploit | 19,396,072,336,238 | f6153e5ebcdd70faaca38f0f5e14b86545a50e9b | c795161c834b18b120e6c906a3618c5841e72b45 | /prexploit/util/report.py | a4537dd04e344bedf8b1bcee4c36c77da4fbd82d | [
"Apache-2.0"
] | permissive | https://github.com/ActorExpose/prexploit | 266151de9557d5e53eedc7c32a1cf077d0dff8c0 | 894a099dc4466526a1d66ae24755675b757b8bcf | refs/heads/main | 2023-02-22T15:43:07.943431 | 2021-01-20T14:44:36 | 2021-01-20T14:44:36 | 331,435,177 | 1 | 1 | Apache-2.0 | true | 2021-01-20T21:19:49 | 2021-01-20T21:19:49 | 2021-01-20T21:19:48 | 2021-01-20T14:44:59 | 280 | 0 | 0 | 0 | null | false | false | import numpy as np
from tabulate import tabulate
class Report(object):
def __init__(self):
self.__report = {}
@classmethod
def from_dict(cls, dic):
report = cls()
report._Report__report = dic
return report
def __add_scores(self, cate, precision, recall, fscore):
if cate not in self.__report:
self.__report[cate] = []
self.__report[cate].append({
'precision': precision,
'recall': recall,
'fscore': fscore,
})
def add_train_scores(self, precision, recall, fscore):
self.__add_scores('train', precision, recall, fscore)
def add_valid_scores(self, precision, recall, fscore):
self.__add_scores('valid', precision, recall, fscore)
def add_test_scores(self, precision, recall, fscore):
self.__add_scores('test', precision, recall, fscore)
def as_dict(self):
return self.__report
def get_average_precision_recall_fscore(self):
def avg_prf(xs):
p = np.mean([x['precision'] for x in xs]).round(2)
r = np.mean([x['recall'] for x in xs]).round(2)
f = np.mean([x['fscore'] for x in xs]).round(2)
return [p, r, f]
return avg_prf(self.__report['train'])\
+ avg_prf(self.__report['valid'])\
+ avg_prf(self.__report['test'])\
def table_report(reports, names):
table = [
['', 'Train', 'Train', 'Train',
'Valid', 'Valid', 'Valid',
'Test', 'Test', 'Test'],
['', 'Precision', 'Recall', 'F Score',
'Precision', 'Recall', 'F Score',
'Precision', 'Recall', 'F Score']
]
for report, name in zip(reports, names):
table.append([name] + report.get_average_precision_recall_fscore())
return tabulate(table)
| UTF-8 | Python | false | false | 1,847 | py | 35 | report.py | 16 | 0.545208 | 0.543584 | 0 | 61 | 29.278689 | 75 |
krishSona/testbackend | 154,618,848,039 | 863858aec2f2de10dc685e019768510c193687f2 | 4b87a0de0f43de2bde41f2590faac970c18fe482 | /core/models.py | 6893eef8672f9b0b2c3b3328b1c8cfee0c95287d | [] | no_license | https://github.com/krishSona/testbackend | d0bc325776537d9814b9022b3538b5e8a840e6a4 | d87e050d02542c58876d4f81c2ea99815ab4160e | refs/heads/master | 2023-04-08T01:26:42.070058 | 2021-04-03T06:08:54 | 2021-04-03T06:08:54 | 354,214,243 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import uuid
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import Q
from dynamic_validator import ModelFieldRequiredMixin
import utilities
import datetime
import calendar
import math
class Industry(ModelFieldRequiredMixin, models.Model):
name = models.CharField(max_length=50)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
class EmployeeRange(ModelFieldRequiredMixin, models.Model):
number = models.CharField(max_length=10)
REQUIRED_FIELDS = ['number']
def __str__(self):
return str(self.number)
class City(ModelFieldRequiredMixin, models.Model):
name = models.CharField(max_length=50)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
class State(ModelFieldRequiredMixin, models.Model):
name = models.CharField(max_length=50)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
COMPANY_CATEGORY = [
(0, 'contractor'),
(1, 'principal_employer'),
]
class Company(ModelFieldRequiredMixin, models.Model):
rid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
industry = models.ForeignKey(
Industry, on_delete=models.PROTECT, null=True, blank=True)
employee_range = models.ForeignKey(
EmployeeRange, on_delete=models.PROTECT, null=True, blank=True)
office_address = models.TextField(null=True, blank=True)
city = models.ForeignKey(
City, on_delete=models.PROTECT, null=True, blank=True)
state = models.ForeignKey(
State, on_delete=models.PROTECT, null=True, blank=True)
pincode = models.CharField(max_length=6, null=True, blank=True)
gstin = models.CharField(
max_length=15, unique=True, null=True, blank=True)
average_monthly_salary_payout = models.IntegerField(
blank=True, null=True)
monthly_salary_day = models.IntegerField(blank=True, null=True)
category = models.IntegerField(default=0, choices=COMPANY_CATEGORY)
code = models.CharField(max_length=6, null=True, blank=True, unique=True)
domain_name = models.CharField(max_length=250, null=True, blank=True, unique=True)
tie_up = models.BooleanField(default=False)
subscription_amount = models.FloatField(default=0)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
class QrCode(models.Model, ModelFieldRequiredMixin):
qr_id = models.CharField(max_length=23, unique=True)
longitude = models.DecimalField(max_digits=10, decimal_places=7, null=True, blank=True)
latitude = models.DecimalField(max_digits=10, decimal_places=7, null=True, blank=True)
company = models.ForeignKey(Company, on_delete=models.PROTECT)
REQUIRED_FIELDS = ['qr_id', 'company']
def __str__(self):
return str(self.qr_id)
class Department(ModelFieldRequiredMixin, models.Model):
name = models.CharField(max_length=50)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
class Designation(ModelFieldRequiredMixin, models.Model):
name = models.CharField(max_length=50)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
class Bank(ModelFieldRequiredMixin, models.Model):
name = models.CharField(max_length=100)
REQUIRED_FIELDS = ['name']
def __str__(self):
return str(self.name)
class Ifs(ModelFieldRequiredMixin, models.Model):
code = models.CharField(max_length=11)
bank = models.ForeignKey(Bank, on_delete=models.PROTECT)
REQUIRED_FIELDS = ['code', 'bank']
def __str__(self):
return str(self.code)
class Level(ModelFieldRequiredMixin, models.Model):
title = models.CharField(max_length=50)
REQUIRED_FIELDS = ['title']
def __str__(self):
return str(self.title)
class Employer(ModelFieldRequiredMixin, models.Model):
rid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50, null=True, blank=True)
phone = models.CharField(max_length=10, null=True, blank=True)
email = models.CharField(max_length=255)
user = models.ForeignKey('authentication.User', on_delete=models.PROTECT)
company = models.ForeignKey(
Company, on_delete=models.PROTECT, blank=True, null=True)
department = models.ForeignKey(
Department, on_delete=models.PROTECT, null=True, blank=True)
designation = models.ForeignKey(
Designation, on_delete=models.PROTECT, null=True, blank=True)
principal_companies = models.ManyToManyField(Company, related_name='employers', blank=True)
REQUIRED_FIELDS = ['email', 'name', 'phone', 'company']
def __str__(self):
return str(self.email)
def get_default_work_days():
return {"days": ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]}
class Employee(models.Model):
rid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=100, null=True, blank=True)
phone = models.CharField(max_length=10, null=True, blank=True)
email = models.EmailField(
verbose_name='email address',
max_length=255,
null=True,
blank=True
)
mail_verified = models.BooleanField(null=True, blank=True)
company = models.ForeignKey(
Company, on_delete=models.PROTECT, blank=True, null=True)
company_verified = models.BooleanField(null=True, blank=True)
net_monthly_salary = models.IntegerField()
verified_salary = models.IntegerField(null=True, blank=True)
due_day = models.IntegerField(null=True, blank=True)
kyc = models.BooleanField(null=True, blank=True)
e_nach = models.BooleanField(null=True, blank=True)
is_verified = models.BooleanField(default=False)
employee_id = models.CharField(max_length=50, null=True, blank=True)
extra_data = JSONField(null=True, blank=True)
joining_date = models.DateField(null=True, blank=True)
salary_day = models.IntegerField(null=True, blank=True)
created_at = models.DateField(auto_now_add=True, null=True, blank=True)
permanent_address = models.TextField(null=True, blank=True)
permanent_city = models.CharField(max_length=50, blank=True, null=True)
permanent_state = models.CharField(max_length=50, blank=True, null=True)
permanent_pincode = models.IntegerField(null=True, blank=True)
current_address = models.TextField(null=True, blank=True)
current_city = models.CharField(max_length=50, blank=True, null=True)
current_state = models.CharField(max_length=50, blank=True, null=True)
current_pincode = models.IntegerField(null=True, blank=True)
service_status = models.IntegerField(default=0)
credit_limit = models.FloatField(default=50)
bank_account_number = models.CharField(max_length=18, null=True, blank=True)
ifs = models.ForeignKey(Ifs, on_delete=models.PROTECT, null=True, blank=True)
level = models.ForeignKey(
Level, on_delete=models.PROTECT, null=True, blank=True)
confirmed = models.BooleanField(default=False)
user = models.ForeignKey('authentication.User', on_delete=models.PROTECT)
salary_type = models.CharField(max_length=50, default="net")
agreed_with_terms_and_conditions = models.BooleanField(default=False)
daily_salary = models.FloatField(default=0)
balance = models.FloatField(default=0)
credited = models.FloatField(default=0)
debited = models.FloatField(default=0)
withdraw = models.FloatField(default=0)
fees = models.FloatField(default=0)
gst = models.FloatField(default=0)
work_days = JSONField(default=get_default_work_days)
work_timings = models.CharField(max_length=17, default="09:00 AM-06:00 PM")
department = models.ForeignKey(
Department, on_delete=models.PROTECT, null=True, blank=True)
designation = models.ForeignKey(
Designation, on_delete=models.PROTECT, null=True, blank=True)
employer = models.ForeignKey(
Employer, on_delete=models.PROTECT, blank=True, null=True)
beneficiary_id = models.CharField(null=True, blank=True, max_length=20)
is_beneficiary = models.BooleanField(default=False)
check_location = models.BooleanField(default=True)
wish_listing = models.BooleanField(default=False)
mail_enabled = models.BooleanField(default=True, null=False)
mail_bounced = models.IntegerField(default=0)
mail_token = models.CharField(max_length=40, null=True, blank=True)
unsubscribe_token = models.CharField(max_length=40, null=True, blank=True)
deleted_at = models.DateTimeField(null=True, blank=True)
digital_time_stamp = models.TextField(null=True, blank=True)
active = models.BooleanField(default=True)
REQUIRED_FIELDS = ['net_monthly_salary', 'user', 'company', 'employer']
def save(self, *args, **kwargs):
if not self.pk:
# create mail_token and unsubscribe_token of employee
self.mail_token = utilities.get_random_time_based_token()
self.unsubscribe_token = utilities.get_random_time_based_token()
super(Employee, self).save(*args, **kwargs)
def __str__(self):
return str(self.phone)
def get_daily_salary(self):
now = datetime.datetime.now()
year = now.year
month = now.month
num_days_in_current_month = calendar.monthrange(year, month)[1]
work_days_in_current_month = [
datetime.date(year, month, day).weekday() for day in range(1, num_days_in_current_month + 1)
if datetime.date(year, month, day).strftime('%a') in self.work_days.get('days')
]
num_work_days_in_current_month = len(work_days_in_current_month)
daily_salary = self.net_monthly_salary / num_work_days_in_current_month
daily_salary = (math.floor(daily_salary / 50)) * 50
return daily_salary
# TODO: optimize (make a model field)
def get_available_balance(self):
attendance_queryset = Attendance.objects.filter(
employee=self.pk,
date__month=datetime.datetime.now().month,
date__year=datetime.datetime.now().year
)
total_verified_salary = sum([attendance.verified_salary for attendance in attendance_queryset])
transfer_up_to = (total_verified_salary * self.credit_limit) / 100
transfer_up_to = transfer_up_to - self.withdraw
return float(round(transfer_up_to))
def calculate_work_day(self, num_days):
now = datetime.datetime.now()
year = now.year
month = now.month
work_days = [
datetime.date(year, month, day).weekday() for day in range(1, num_days + 1)
if datetime.date(year, month, day).strftime('%a') in self.work_days.get('days')
]
return len(work_days)
DURATION_CHOICES = [
("full_day", "full_day"),
("half_day", "half_day"),
]
WORK_LOCATION_CHOICES = [
("office", "office"),
("home", "home"),
("other", "other"),
]
class Attendance(ModelFieldRequiredMixin, models.Model):
date = models.DateField(db_index=True, default=datetime.datetime.now)
status = models.CharField(
max_length=30, null=True, db_index=True)
duration = models.CharField(
max_length=10, null=True, blank=True, db_index=True, choices=DURATION_CHOICES)
start_at = models.TimeField(null=True, blank=True)
end_at = models.TimeField(null=True, blank=True)
work_location = models.CharField(
max_length=10, null=True, blank=True, choices=WORK_LOCATION_CHOICES)
qr_code_scanned = models.BooleanField(null=True, blank=True)
face_detected = models.BooleanField(null=True, blank=True)
employee = models.ForeignKey(Employee, on_delete=models.PROTECT)
company = models.ForeignKey(Company, on_delete=models.PROTECT)
salary = models.FloatField(default=0)
verified_salary = models.FloatField(default=0)
description = models.CharField(max_length=255, null=True, blank=True)
image = models.ImageField(upload_to='images/attendance/', null=True, blank=True)
REQUIRED_FIELDS = ['status', 'employee', 'company']
def __str__(self):
return str(self.date)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['date', 'employee', 'company'],
name='make_unique_date_employee_company'
),
]
STATEMENT_STATUS_CHOICES = [
("initialized", "initialized"),
("pending", "pending"),
("waiting", "waiting"),
("rejected", "rejected"),
("approved", "approved"),
("cancelled", "cancelled"),
("completed", "completed"),
]
class Statement(ModelFieldRequiredMixin, models.Model):
rid = models.UUIDField(default=uuid.uuid4, editable=False)
date = models.DateField(auto_now_add=True, db_index=True)
status = models.CharField(max_length=15, choices=STATEMENT_STATUS_CHOICES, default="initialized")
description = models.CharField(max_length=255)
credit = models.FloatField(null=True)
debit = models.FloatField(null=True)
withdraw = models.FloatField(null=True, blank=True)
fees = models.FloatField(null=True, blank=True)
gst = models.FloatField(null=True, blank=True)
balance = models.FloatField(null=True, blank=True)
current_due = models.FloatField(null=True, blank=True)
previous_due = models.FloatField(null=True, blank=True)
interest = models.FloatField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
employee = models.ForeignKey(Employee, on_delete=models.PROTECT)
company = models.ForeignKey(Company, on_delete=models.PROTECT)
otp = models.CharField(max_length=6, null=True, blank=True)
otp_valid_till = models.DateTimeField(null=True, blank=True)
digital_time_stamp = models.TextField(null=True, blank=True)
REQUIRED_FIELDS = ['description', 'balance', 'employee', 'company']
def __str__(self):
return str(self.date)
class Meta:
constraints = [
models.CheckConstraint(
check=Q(debit__isnull=False) | Q(credit__isnull=False),
name='not_both_null'
)
]
BOOKING_STATUS_CHOICES = [
(0, 'open'),
(1, 'pending'),
(2, 'closed'),
]
CATEGORY_CHOICE = [
(1, 'employee'),
(2, 'employer'),
]
class Booking(models.Model):
name = models.CharField(max_length=255)
company = models.CharField(max_length=255, null=True, blank=True)
phone = models.CharField(max_length=10)
email = models.CharField(max_length=255, null=True, blank=True)
category = models.IntegerField(null=True, blank=True, choices=CATEGORY_CHOICE)
status = models.IntegerField(default=0, db_index=True, choices=BOOKING_STATUS_CHOICES)
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
def __str__(self):
return str(self.name)
class Setting(models.Model):
key = models.CharField(max_length=255, unique=True)
value = models.CharField(max_length=255)
def __str__(self):
return str(self.key)
class Pricing(models.Model):
company = models.ForeignKey(Company, on_delete=models.PROTECT, null=True, blank=True)
min_price = models.IntegerField(null=True, blank=True)
max_price = models.IntegerField()
fee = models.IntegerField()
def __str__(self):
return str(self.fee)
@staticmethod
def calculate_fees(amount, company_obj):
if amount is None or amount < 0:
return None
if company_obj.tie_up is False and amount == 0:
return None
if company_obj.tie_up is True and amount == 0:
price_obj = Pricing.objects.filter(
Q(company=company_obj) & Q(min_price=None) & Q(max_price=0)
).first()
fee = float(price_obj.fee) if price_obj else None
else:
price_obj = Pricing.objects.filter(
Q(company=company_obj) & Q(min_price__lt=amount) & Q(max_price__gte=amount)
).first()
fee = float(price_obj.fee) if price_obj else None
if fee and fee > 0:
gst = (fee * 18) / 100
fee = fee + gst
return fee
class Verifier(models.Model):
employee = models.ManyToManyField(Employee)
email = models.EmailField()
counter = models.IntegerField(default=0)
def __str__(self):
return str(self.email)
DOMAIN_CHOICE = [
(0, 'generic'),
(1, 'company')
]
class Domain(models.Model):
name = models.CharField(max_length=30, unique=True, db_index=True)
category = models.IntegerField(null=True, blank=True, choices=DOMAIN_CHOICE)
company = models.ForeignKey(Company, on_delete=models.PROTECT, null=True, blank=True)
def __str__(self):
return str(self.name)
class Application(models.Model):
name = models.CharField(max_length=255, null=True, blank=True)
phone = models.CharField(max_length=255, null=True, blank=True)
company_email = models.CharField(max_length=255, null=True, blank=True)
employee_id = models.CharField(max_length=255, null=True, blank=True)
company_name = models.CharField(max_length=255, null=True, blank=True)
net_monthly_salary = models.CharField(max_length=255, null=True, blank=True)
salary_day = models.CharField(max_length=255, null=True, blank=True)
bank_name = models.CharField(max_length=255, null=True, blank=True)
bank_account_name = models.CharField(max_length=255, null=True, blank=True)
bank_account_number1 = models.CharField(max_length=255, null=True, blank=True)
bank_account_number2 = models.CharField(max_length=255, null=True, blank=True)
ifsc = models.CharField(max_length=255, null=True, blank=True)
utm_source = models.CharField(max_length=255, null=True, blank=True)
utm_medium = models.CharField(max_length=255, null=True, blank=True)
utm_campaign = models.CharField(max_length=255, null=True, blank=True)
deleted_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, db_index=True, null=True, blank=True)
def __str__(self):
return str(self.name)
| UTF-8 | Python | false | false | 18,311 | py | 266 | models.py | 230 | 0.676861 | 0.665174 | 0 | 488 | 36.522541 | 104 |
RodenLuo/mummi-ras | 1,047,972,056,379 | 66f229e70ed33797b2555e5121601929b3e6dac5 | e48059f5f20b1b39893e6fb5fc8eccb4307be1bf | /mummi_ras/online/cg/baseFastSingleFrame.py | d36281d9779ba8477d3abbafe52898c7ba451eae | [
"MIT"
] | permissive | https://github.com/RodenLuo/mummi-ras | d99b48517398fd9211b0947ecc03715b37fca84a | e1524cf661be32ab97e00dd0424eda0338f3692b | refs/heads/main | 2023-09-01T23:14:44.622819 | 2021-11-16T07:13:45 | 2021-11-16T07:13:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Analysis building blocks --- :mod:`MDAnalysis.analysis.base`
============================================================
A collection of useful building blocks for creating Analysis
classes.
"""
import six
from six.moves import range, zip
import inspect
import numpy as np
from MDAnalysis import coordinates
from MDAnalysis.core.groups import AtomGroup
class AnalysisBase(object):
"""Base class for defining multi frame analysis
The class it is designed as a template for creating multiframe analyses.
This class will automatically take care of setting up the trajectory
reader for iterating, and it offers to show a progress meter.
To define a new Analysis, `AnalysisBase` needs to be subclassed
`_single_frame` must be defined. It is also possible to define
`_prepare` and `_conclude` for pre and post processing. See the example
below.
.. code-block:: python
class NewAnalysis(AnalysisBase):
def __init__(self, atomgroup, parameter, **kwargs):
super(NewAnalysis, self).__init__(atomgroup.universe.trajectory,
**kwargs)
self._parameter = parameter
self._ag = atomgroup
def _prepare(self):
# OPTIONAL
# Called before iteration on the trajectory has begun.
# Data structures can be set up at this time
self.result = []
def _single_frame(self):
# REQUIRED
# Called after the trajectory is moved onto each new frame.
# store result of `some_function` for a single frame
self.result.append(some_function(self._ag, self._parameter))
def _conclude(self):
# OPTIONAL
# Called once iteration on the trajectory is finished.
# Apply normalisation and averaging to results here.
self.result = np.asarray(self.result) / np.sum(self.result)
Afterwards the new analysis can be run like this.
.. code-block:: python
na = NewAnalysis(u.select_atoms('name CA'), 35).run()
print(na.result)
"""
def __init__(self, trajectory, start=None,
stop=None, step=None, verbose=None, quiet=None):
"""
Parameters
----------
trajectory : mda.Reader
A trajectory Reader
start : int, optional
start frame of analysis
stop : int, optional
stop frame of analysis
step : int, optional
number of frames to skip between each analysed frame
verbose : bool, optional
Turn on verbosity
"""
# @TODO - this needs to be fixed, _verbose is not supported in current MDAnalysi, also see below
#self._verbose = _set_verbose(verbose, quiet, default=False)
self._verbose = True
self._quiet = not self._verbose
self._setup_frames(trajectory, start, stop, step)
def _setup_frames(self, trajectory, start=None, stop=None, step=None):
"""
Pass a Reader object and define the desired iteration pattern
through the trajectory
Parameters
----------
trajectory : mda.Reader
A trajectory Reader
start : int, optional
start frame of analysis
stop : int, optional
stop frame of analysis
step : int, optional
number of frames to skip between each analysed frame
"""
self._trajectory = trajectory
start, stop, step = trajectory.check_slice_indices(start, stop, step)
self.start = start
self.stop = stop
self.step = step
self.n_frames = len(list(range(start, stop, step)))
interval = int(self.n_frames // 100)
if interval == 0:
interval = 1
# ensure _verbose is set when __init__ wasn't called, this is to not
# break pre 0.16.0 API usage of AnalysisBase
if not hasattr(self, '_verbose'):
# @TODO - this needs to be fixed, _verbose is not supported in current MDAnalysis so here just commented out
'''
if hasattr(self, '_quiet'):
# Here, we are in the odd case where a children class defined
# self._quiet without going through AnalysisBase.__init__.
warnings.warn("The *_quiet* attribute of analyses is "
"deprecated (from 0.16)use *_verbose* instead.",
DeprecationWarning)
self._verbose = not self._quiet
else:
self._verbose = True
self._quiet = not self._verbose
'''
#self._pm = ProgressBar(self.n_frames if self.n_frames else 1,
# interval=interval, verbose=self._verbose)
def _single_frame(self):
"""Calculate data from a single frame of trajectory
Don't worry about normalising, just deal with a single frame.
"""
raise NotImplementedError("Only implemented in child classes")
def _prepare(self):
"""Set things up before the analysis loop begins"""
pass
def _conclude(self):
"""Finalise the results you've gathered.
Called at the end of the run() method to finish everything up.
"""
pass
def run(self):
"""Perform the calculation"""
# logger.info("Starting preparation")
self._prepare()
# for i, ts in enumerate(
# self._trajectory[self.start:self.stop:self.step]):
# self._frame_index = i
# self._ts = ts
# logger.info("--> Doing frame {} of {}".format(i+1, self.n_frames))
# self._single_frame()
# self._pm.echo(self._frame_index)
# logger.info("Finishing up")
self._frame_index = 0
# this would work: "self._ts = self._trajectory[0]" but it takes too much time for the copy, so just use self.g1 in the method
self._single_frame()
#self._pm.echo(self._frame_index)
self._conclude()
return self
class AnalysisFromFunction(AnalysisBase):
"""
Create an analysis from a function working on AtomGroups
Attributes
----------
results : ndarray
results of calculation are stored after call to ``run``
Example
-------
>>> def rotation_matrix(mobile, ref):
>>> return mda.analysis.align.rotation_matrix(mobile, ref)[0]
>>> rot = AnalysisFromFunction(rotation_matrix, trajectory, mobile, ref).run()
>>> print(rot.results)
Raises
------
ValueError : if ``function`` has the same kwargs as ``BaseAnalysis``
"""
def __init__(self, function, trajectory=None, *args, **kwargs):
"""Parameters
----------
function : callable
function to evaluate at each frame
trajectory : mda.coordinates.Reader (optional)
trajectory to iterate over. If ``None`` the first AtomGroup found in
args and kwargs is used as a source for the trajectory.
*args : list
arguments for ``function``
**kwargs : dict
arugments for ``function`` and ``AnalysisBase``
"""
if (trajectory is not None) and (not isinstance(
trajectory, coordinates.base.ProtoReader)):
args = args + (trajectory,)
trajectory = None
if trajectory is None:
for arg in args:
if isinstance(arg, AtomGroup):
trajectory = arg.universe.trajectory
# when we still didn't find anything
if trajectory is None:
for arg in six.itervalues(kwargs):
if isinstance(arg, AtomGroup):
trajectory = arg.universe.trajectory
if trajectory is None:
raise ValueError("Couldn't find a trajectory")
self.function = function
self.args = args
base_kwargs, self.kwargs = _filter_baseanalysis_kwargs(self.function,
kwargs)
super(AnalysisFromFunction, self).__init__(trajectory, **base_kwargs)
def _prepare(self):
self.results = []
def _single_frame(self):
self.results.append(self.function(*self.args, **self.kwargs))
def _conclude(self):
self.results = np.asarray(self.results)
def analysis_class(function):
"""
Transform a function operating on a single frame to an analysis class
For an usage in a library we recommend the following style:
>>> def rotation_matrix(mobile, ref):
>>> return mda.analysis.align.rotation_matrix(mobile, ref)[0]
>>> RotationMatrix = analysis_class(rotation_matrix)
It can also be used as a decorator:
>>> @analysis_class
>>> def RotationMatrix(mobile, ref):
>>> return mda.analysis.align.rotation_matrix(mobile, ref)[0]
>>> rot = RotationMatrix(u.trajectory, mobile, ref, step=2).run()
>>> print(rot.results)
"""
class WrapperClass(AnalysisFromFunction):
def __init__(self, trajectory=None, *args, **kwargs):
super(WrapperClass, self).__init__(function, trajectory,
*args, **kwargs)
return WrapperClass
def _filter_baseanalysis_kwargs(function, kwargs):
"""
create two dictionaries with kwargs separated for function and AnalysisBase
Parameters
----------
function : callable
function to be called
kwargs : dict
keyword argument dictionary
Returns
-------
base_args : dict
dictionary of AnalysisBase kwargs
kwargs : dict
kwargs without AnalysisBase kwargs
Raises
------
ValueError : if ``function`` has the same kwargs as ``BaseAnalysis``
"""
base_argspec = inspect.getargspec(AnalysisBase.__init__)
n_base_defaults = len(base_argspec.defaults)
base_kwargs = {name: val
for name, val in zip(base_argspec.args[-n_base_defaults:],
base_argspec.defaults)}
argspec = inspect.getargspec(function)
for base_kw in six.iterkeys(base_kwargs):
if base_kw in argspec.args:
raise ValueError(
"argument name '{}' clashes with AnalysisBase argument."
"Now allowed are: {}".format(base_kw, list(base_kwargs.keys())))
base_args = {}
for argname, default in six.iteritems(base_kwargs):
base_args[argname] = kwargs.pop(argname, default)
return base_args, kwargs
| UTF-8 | Python | false | false | 11,726 | py | 79 | baseFastSingleFrame.py | 64 | 0.5927 | 0.586389 | 0 | 331 | 34.425982 | 135 |
HaroldMills/Vesper | 592,705,504,951 | c93205961fcaed46459efe3c2550db8ca66858d3 | 085488720112922ff3aed15f99f3c93911425c4a | /vesper/ephem/tests/scripts/create_usno_csv_files.py | 6b78697ade38c6044ede0392d279b3687eaaa0d5 | [
"MIT"
] | permissive | https://github.com/HaroldMills/Vesper | 0b61d18bc241af22bfc251088fc87d72add6367b | ec92fe5231f54336499db189a3bbc6cb08a19e61 | refs/heads/master | 2023-07-05T22:45:27.316498 | 2023-07-04T11:58:14 | 2023-07-04T11:58:14 | 19,112,486 | 49 | 6 | MIT | false | 2023-02-14T16:09:19 | 2014-04-24T14:55:34 | 2022-12-26T00:18:32 | 2023-02-14T16:09:18 | 105,720 | 42 | 3 | 32 | Python | false | false | """
Script that creates CSV files from a directory of USNO tables.
The script assumes that every file in the input directory is either a
rise/set table or an altitude/azimuth table. It creates two CSV files,
one named "USNO Rise Set Data.csv" and the other named
"USNO Altitude Azimuth Data.csv".
"""
import csv
import datetime
import os
import vesper.ephem.usno_table_utils as utils
_DATA_DIR_PATH = r'C:\Users\Harold\Desktop\NFC\Data'
_TABLES_DIR_PATH = os.path.join(_DATA_DIR_PATH, 'USNO Tables Test')
_RS_CSV_FILE_PATH = os.path.join(_DATA_DIR_PATH, 'USNO Rise Set Data.csv')
_AA_CSV_FILE_PATH = os.path.join(
_DATA_DIR_PATH, 'USNO Altitude Azimuth Data.csv')
_RS_COLUMN_NAMES = (
'Latitude', 'Longitude', 'Local Date', 'Event', 'UTC Time')
_AA_COLUMN_NAMES = (
'Latitude', 'Longitude', 'UTC Time', 'Body', 'Altitude', 'Azimuth',
'Illumination')
_RS_TABLE_TYPES = frozenset(utils.RISE_SET_TABLE_TYPES)
_AA_TABLE_TYPES = frozenset(utils.ALTITUDE_AZIMUTH_TABLE_TYPES)
def _main():
rs_file, rs_writer = _open_csv_file(_RS_CSV_FILE_PATH, _RS_COLUMN_NAMES)
aa_file, aa_writer = _open_csv_file(_AA_CSV_FILE_PATH, _AA_COLUMN_NAMES)
for (dir_path, _, file_names) in os.walk(_TABLES_DIR_PATH):
for file_name in file_names:
table = _create_table(dir_path, file_name)
if table.type in _RS_TABLE_TYPES:
_append_rs_table_data(table, rs_writer)
else:
_append_aa_table_data(table, aa_writer)
print(file_name, table.type)
rs_file.close()
aa_file.close()
def _open_csv_file(file_path, column_names):
file_ = open(file_path, 'w', newline='')
writer = csv.writer(file_)
writer.writerow(column_names)
return (file_, writer)
def _create_table(dir_path, file_name):
file_name_table_type = file_name.split('_')[0]
table_type = utils.get_table_type(file_name_table_type)
table_class = utils.get_table_class(table_type)
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'r') as file_:
table_text = file_.read()
return table_class(table_text)
_RISE_EVENTS = {
'Sunrise/Sunset': 'Sunrise',
'Moonrise/Moonset': 'Moonrise',
'Civil Twilight': 'Civil Dawn',
'Nautical Twilight': 'Nautical Dawn',
'Astronomical Twilight': 'Astronomical Dawn'
}
_SET_EVENTS = {
'Sunrise/Sunset': 'Sunset',
'Moonrise/Moonset': 'Moonset',
'Civil Twilight': 'Civil Dusk',
'Nautical Twilight': 'Nautical Dusk',
'Astronomical Twilight': 'Astronomical Dusk'
}
def _append_rs_table_data(table, writer):
rows = []
lat = table.lat
lon = table.lon
event = _RISE_EVENTS[table.type]
_append_rows(rows, lat, lon, event, table.rising_times)
event = _SET_EVENTS[table.type]
_append_rows(rows, lat, lon, event, table.setting_times)
writer.writerows(rows)
def _append_rows(rows, lat, lon, event, times):
for dt in times:
local_dt = _get_naive_local_time(dt, lon)
date = local_dt.date()
time = dt.strftime('%Y-%m-%d %H:%M')
rows.append((lat, lon, date, event, time))
def _get_naive_local_time(time, lon):
naive_time = time.replace(tzinfo=None)
utc_offset = datetime.timedelta(hours=lon * 24. / 360.)
return naive_time + utc_offset
def _append_aa_table_data(table, writer):
rows = []
lat = table.lat
lon = table.lon
body = table.body
for data in table.data:
if table.body == 'Sun':
(time, alt, az) = data
illumination = None
else:
(time, alt, az, illumination) = data
time = time.strftime('%Y-%m-%d %H:%M')
rows.append((lat, lon, time, body, alt, az, illumination))
writer.writerows(rows)
if __name__ == '__main__':
_main()
| UTF-8 | Python | false | false | 3,987 | py | 540 | create_usno_csv_files.py | 446 | 0.600702 | 0.599197 | 0 | 145 | 26.468966 | 76 |
FLHCoLtd/hassio-ferqo-cc | 14,731,737,862,513 | ed152fb8573920c9ed40cfaab341c8af835c57f8 | 2dd568a0cf5a5aff965c2526c3488f4c148313f4 | /scene.py | 3b3c4fbf1f6482ffafd083c31ef9f227ffd96285 | [] | no_license | https://github.com/FLHCoLtd/hassio-ferqo-cc | d44fa423a5c6a77abbdf98b92016ad2deadb4e2c | e1049be281551a503db90b0a431e585162c76a21 | refs/heads/main | 2023-04-09T17:11:29.885023 | 2021-04-14T01:40:53 | 2021-04-14T01:40:53 | 357,737,896 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from homeassistant.helpers.entity import Entity
import logging
from .const import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Add the Sonoff Sensor entities"""
# for device in hass.data[DOMAIN].get_devices(force_update = False):
# as far as i know only 1-switch devices seem to have sensor-like capabilities
async def loadingentities():
entities = []
for device in hass.data[DOMAIN].getEntitiesType("s"):
CC_device = FerqoCCSensor(device, hass)
entities.append(CC_device)
return entities
entities = await loadingentities()
if len(entities):
async_add_entities(entities, update_before_add=False)
return True
class FerqoCCSensor(Entity):
"""Representation of a sensor."""
def __init__(self, CC_device, hass):
self.hub = hass.data[DOMAIN]
self.CC_device = CC_device
self.sensorType = CC_device["sensorType"]
self._state = CC_device[str(CC_device["sensorType"])]
self.unit = CC_device["sensorUnit"]
self._name = "Ferqo." + CC_device["name"]
self.node_id = CC_device["node_id"]
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self.unit
async def async_update(self):
"""Retrieve latest state."""
List = self.hub.getEntitiesType("sensor")
self.CC_List = List
for i in range(len(self.CC_List)):
if (self.node_id == self.CC_List[i]["node_id"]):
if (self.sensorType == self.CC_List[i]["sensorType"]):
self._state = self.CC_List[i][str(self.CC_List[i]["sensorType"])]
self._name = "Ferqo." + self.CC_List[i]["name"]
| UTF-8 | Python | false | false | 2,001 | py | 20 | scene.py | 19 | 0.606197 | 0.605697 | 0 | 55 | 35.363636 | 86 |
mcneel/compute.rhino3d | 876,173,378,174 | 9f13c9a93c8c0459ac12b7052638689201e86c75 | 91d5349ef6a8259ba0a551e70e37cb29d6817652 | /src/ghhops-server-py/ghhops_server/params.py | 02196b3b5ef5eaa57cd0a9efab2e8e90a37d26e2 | [
"MIT"
] | permissive | https://github.com/mcneel/compute.rhino3d | 641dc3e88f53d892f6b75ce14924a752ff949e5b | 0acf93ae9aa520fbbfa64ee97f77088a9005f3d4 | refs/heads/7.x | 2023-08-28T03:41:04.618336 | 2023-08-23T18:11:47 | 2023-08-23T18:11:47 | 119,090,587 | 242 | 184 | NOASSERTION | false | 2023-08-23T18:11:49 | 2018-01-26T18:53:54 | 2023-08-05T19:45:28 | 2023-08-23T18:11:48 | 3,973 | 245 | 157 | 73 | C# | false | false | """Hops Component Parameter wrappers"""
import json
from enum import Enum
import inspect
from ghhops_server.base import _HopsEncoder
from ghhops_server.logger import hlogger
from pprint import pprint
__all__ = (
"HopsParamAccess",
# "HopsArc",
"HopsBoolean",
# "HopsBox",
"HopsBrep",
"HopsCircle",
# "HopsColour",
# "HopsComplex"
# "HopsCulture",
"HopsCurve",
# "HopsField",
# "HopsFilePath",
# "HopsGeometry",
"HopsInteger",
# "HopsInterval",
# "HopsInterval2D"
"HopsLine",
# "HopsMatrix",
"HopsMesh",
# "HopsMeshFace",
"HopsNumber",
"HopsPlane",
"HopsPoint",
# "HopsRectangle",
"HopsString",
# "HopsStructurePath",
"HopsSubD",
"HopsSurface",
# "HopsTime",
# "HopsTransform",
"HopsVector",
)
RHINO = None
RHINO_FROMJSON = None
RHINO_TOJSON = None
RHINO_GEOM = None
CONVERT_VALUE = None
def _init_rhinoinside():
global RHINO
global RHINO_FROMJSON
global RHINO_TOJSON
global RHINO_GEOM
global CONVERT_VALUE
# initialize with Rhino.Inside Cpython ==========
import clr
clr.AddReference("System.Collections")
clr.AddReference("Newtonsoft.Json.Rhino")
import System
import Newtonsoft.Json as NJ
from System.Collections.Generic import Dictionary
def from_json(json_obj):
"""Convert to RhinoCommon from json"""
data_dict = Dictionary[str, str]()
for k, v in json_obj.items():
data_dict[k] = str(v)
return RHINO.Runtime.CommonObject.FromJSON(data_dict)
def to_json(value):
"""Convert RhinoCommon object to json"""
return NJ.JsonConvert.SerializeObject(value)
def convert_value(value):
# FIXME: more value types probably need to be handled
if isinstance(value, bool):
return System.Boolean(value)
elif isinstance(value, int):
return System.Int32(value)
elif isinstance(value, float):
return System.Double(value)
elif isinstance(value, str):
return System.String(value)
return value
RHINO_FROMJSON = from_json
RHINO_TOJSON = to_json
import Rhino
RHINO = Rhino
RHINO_GEOM = Rhino.Geometry
CONVERT_VALUE = convert_value
def _init_rhino3dm():
global RHINO
global RHINO_FROMJSON
global RHINO_TOJSON
global RHINO_GEOM
global CONVERT_VALUE
import rhino3dm
def from_json(json_obj):
"""Convert to rhino3dm from json"""
return rhino3dm.CommonObject.Decode(json_obj)
def to_json(value):
"""Convert rhino3dm object to json"""
return json.dumps(value, cls=_HopsEncoder)
def convert_value(value):
return value
RHINO_FROMJSON = from_json
RHINO_TOJSON = to_json
RHINO_GEOM = rhino3dm
CONVERT_VALUE = convert_value
class HopsParamAccess(Enum):
"""GH Item Access"""
ITEM = 0
LIST = 1
TREE = 2
# TODO:
# - params can have icons too
# cast methods
class _GHParam:
coercers = []
param_type = None
result_type = None
def __init__(
self,
name,
nickname=None,
desc=None,
access: HopsParamAccess = HopsParamAccess.ITEM,
optional=False,
default=None,
):
self.name = name
self.nickname = nickname
self.description = desc
self.access: HopsParamAccess = access or HopsParamAccess.ITEM
self.optional = optional
self.default = default or inspect.Parameter.empty
def _coerce_value(self, param_type, param_data):
# get data as dict
data = json.loads(param_data)
# parse data
if isinstance(self.coercers, dict):
coercer = self.coercers.get(param_type, None)
if coercer:
return coercer(data)
elif param_type.startswith("Rhino.Geometry."):
return RHINO_FROMJSON(data)
return param_data
def encode(self):
"""Parameter serializer"""
param_def = {
"Name": self.name,
"Nickname": self.nickname,
"Description": self.description,
"ParamType": self.param_type,
"ResultType": self.result_type,
"AtLeast": 1,
}
if HopsParamAccess.ITEM == self.access:
param_def["AtMost"] = 1
if HopsParamAccess.LIST == self.access:
param_def["AtMost"] = 2147483647 # Max 32 bit integer value
if HopsParamAccess.TREE == self.access:
param_def["AtLeast"] = -1
param_def["AtMost"] = -1
if self.default != inspect.Parameter.empty:
param_def["Default"] = self.default
return param_def
def from_input(self, input_data):
"""Extract parameter data from serialized input"""
pprint(input_data)
if self.access == HopsParamAccess.TREE:
paths = input_data["InnerTree"]
tree = {}
for k, v in paths.items():
data = []
for param_value_item in v:
param_type = param_value_item["type"]
param_value = param_value_item["data"]
data.append(self._coerce_value(param_type, param_value))
tree[k] = data
return tree
data = []
for param_value_item in input_data["InnerTree"]["{0}"]:
param_type = param_value_item["type"]
param_value = param_value_item["data"]
data.append(self._coerce_value(param_type, param_value))
if self.access == HopsParamAccess.ITEM:
return data[0]
return data
def from_result(self, value):
"""Serialize parameter with given value for output"""
if self.access == HopsParamAccess.TREE and isinstance(value, dict):
tree = {}
for key in value.keys():
branch_data = [
{
"type": self.result_type,
"data": RHINO_TOJSON(CONVERT_VALUE(v)),
}
for v in value[key]
]
tree[key] = branch_data
output = {
"ParamName": self.name,
"InnerTree": tree,
}
return output
if not isinstance(value, tuple) and not isinstance(value, list):
value = (value,)
output_list = [
{"type": self.result_type, "data": RHINO_TOJSON(CONVERT_VALUE(v))}
for v in value
]
output = {
"ParamName": self.name,
"InnerTree": {"0": output_list},
}
return output
class HopsBoolean(_GHParam):
"""Wrapper for GH_Boolean"""
param_type = "Boolean"
result_type = "System.Boolean"
coercers = {"System.Boolean": lambda b: bool(b)}
class HopsBrep(_GHParam):
"""Wrapper for GH Brep"""
param_type = "Brep"
result_type = "Rhino.Geometry.Brep"
class HopsCircle(_GHParam):
"""Wrapper for GH_Circle"""
param_type = "Circle"
result_type = "Rhino.Geometry.Circle"
coercers = {
"Rhino.Geometry.Circle": lambda d: HopsCircle._make_circle(
HopsPlane._make_plane(
d["Plane"]["Origin"], d["Plane"]["XAxis"], d["Plane"]["YAxis"]
),
d["Radius"],
)
}
@staticmethod
def _make_circle(p, r):
circle = RHINO_GEOM.Circle(r)
circle.Plane = p
return circle
class HopsCurve(_GHParam):
"""Wrapper for GH Curve"""
param_type = "Curve"
result_type = "Rhino.Geometry.Curve"
class HopsInteger(_GHParam):
"""Wrapper for GH_Integer"""
param_type = "Integer"
result_type = "System.Int32"
coercers = {"System.Int32": lambda i: int(i)}
class HopsLine(_GHParam):
"""Wrapper for GH_Line"""
param_type = "Line"
result_type = "Rhino.Geometry.Line"
coercers = {
"Rhino.Geometry.Line": lambda l: RHINO_GEOM.Line(
HopsLine._make_point(l["From"]), HopsLine._make_point(l["To"])
)
}
@staticmethod
def _make_point(a):
return RHINO_GEOM.Point3d(a["X"], a["Y"], a["Z"])
class HopsMesh(_GHParam):
"""Wrapper for GH Mesh"""
param_type = "Mesh"
result_type = "Rhino.Geometry.Mesh"
class HopsNumber(_GHParam):
"""Wrapper for GH Number"""
param_type = "Number"
result_type = "System.Double"
coercers = {
"System.Double": lambda d: float(d),
}
class HopsPlane(_GHParam):
"""Wrapper for GH_Plane"""
param_type = "Plane"
result_type = "Rhino.Geometry.Plane"
coercers = {
"Rhino.Geometry.Plane": lambda p: HopsPlane._make_plane(
p["Origin"], p["XAxis"], p["YAxis"]
)
}
@staticmethod
def _make_plane(o, x, y):
return RHINO_GEOM.Plane(
RHINO_GEOM.Point3d(o["X"], o["Y"], o["Z"]),
RHINO_GEOM.Vector3d(x["X"], x["Y"], x["Z"]),
RHINO_GEOM.Vector3d(y["X"], y["Y"], y["Z"]),
)
class HopsPoint(_GHParam):
"""Wrapper for GH Point"""
param_type = "Point"
result_type = "Rhino.Geometry.Point3d"
coercers = {
"Rhino.Geometry.Point2d": lambda d: RHINO_GEOM.Point2d(d["X"], d["Y"]),
"Rhino.Geometry.Point3d": lambda d: RHINO_GEOM.Point3d(
d["X"], d["Y"], d["Z"]
),
"Rhino.Geometry.Vector3d": lambda d: RHINO_GEOM.Vector3d(
d["X"], d["Y"], d["Z"]
),
}
class HopsString(_GHParam):
"""Wrapper for GH_String"""
param_type = "Text"
result_type = "System.String"
coercers = {"System.String": lambda s: s}
class HopsSubD(_GHParam):
"""Wrapper for GH SubD"""
param_type = "SubD"
result_type = "Rhino.Geometry.SubD"
class HopsSurface(_GHParam):
"""Wrapper for GH Surface"""
param_type = "Surface"
result_type = "Rhino.Geometry.Brep"
class HopsVector(_GHParam):
"""Wrapper for GH Vector"""
param_type = "Vector"
result_type = "Rhino.Geometry.Vector3d"
coercers = {
"Rhino.Geometry.Point2d": lambda d: RHINO_GEOM.Point2d(d["X"], d["Y"]),
"Rhino.Geometry.Point3d": lambda d: RHINO_GEOM.Point3d(
d["X"], d["Y"], d["Z"]
),
"Rhino.Geometry.Vector3d": lambda d: RHINO_GEOM.Vector3d(
d["X"], d["Y"], d["Z"]
),
}
| UTF-8 | Python | false | false | 10,426 | py | 100 | params.py | 12 | 0.56244 | 0.557357 | 0 | 416 | 24.0625 | 79 |
jdariasl/prueba | 2,473,901,198,981 | a92bebe8d0013a6b55c9cbc992b33ddfff4d5f91 | 20ac3af659da6f12d359fd889fd0e80565d2219f | /_build/jupyter_execute/content/Clase 08 - Modelos de Mezclas de Gausianas.py | 7983439718e61f33c238a7cb7d69c98f1914051f | [] | no_license | https://github.com/jdariasl/prueba | a1b07a64f71325bd1653d0be839f7e265451a0b6 | c920bf3ca11dc4ddaebed37d4dc5fa038b614a61 | refs/heads/master | 2022-12-23T12:39:13.729297 | 2020-10-03T02:14:17 | 2020-10-03T02:14:17 | 300,769,714 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Modelos de Mezcla de Funciones Gaussianas
# ### Julián D. Arias Londoño
#
# Profesor Asociado
# Departamento de Ingeniería de Sistemas
# Universidad de Antioquia, Medellín, Colombia
# julian.ariasl@udea.edu.co
# Hasta el momento hemos visto que existen dos posibles aproximaciones al problema de clasificación:
# <li>Encontrar una función que represente la frontera de separación entre dos clases</li>
# <li>Separar las muestras por clases y estimar una función de densidad de probabilidad (fdp) por cada una de ellas</li>
# Los modelos pertenecientes a la primera aproximación se conocen como <b>Discriminativos</b>, debido a que para el ajuste de la frontera se utilizan las muestras de las dos clases al mismo tiempo y el criterio de ajuste del modelo está directamente relacionado con disminuir el error de clasificación.
# Los modelos pertenecientes a la segunda aproximación se conocen como <b>Generativos</b> debido a que los modelos se enfocan principalmente en estimar correctamente la fdp de las muestras de cada clase (por ejemplo maximizar la verosimilitud de los datos y el modelo) y no necesariamente en minimizar el error de clasificación. Una vez se tiene un modelo de densidad de probabilidad éste se puede usar para "generar" nuevas muestras, es decir se puede muestrear la fdp y obtener muestras de la misma distribución, por esa razón recibe el nombre de generativos.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import numpy as np
import math
import matplotlib.pyplot as plt
from pylab import *
# Supongamos un problema de clasificación en el cual las muestras se distribuyen de la siguiente manera:
# In[3]:
x1 = np.random.rand(2,50)
x2 = np.random.rand(2,50) + 2
x3 = np.random.rand(2,50) + np.tile([[-1],[2]], (1, 50)) #np.tile Es equivalente a repmat en matlab
x4 = np.random.rand(2,50) + np.tile([[3],[1]], (1, 50))
XC1 = np.concatenate((x1,x3),axis=1)
XC2 = np.concatenate((x2,x4),axis=1)
plt.title('Espacio de caracteristicas', fontsize=14)
plt.xlabel('Caracteristica 1')
plt.ylabel('Caracteristica 2')
plt.scatter(XC1[0,:], XC1[1,:])
plt.scatter(XC2[0,:], XC2[1,:],color='red')
# Si nuestro deseo es usar un clasificador basado en la fdp de cada clase, y por simplicidad decidimos usar un clasificador por función discriminante Gaussiana, es decir, ajustar una función de densidad Gausiana para cada una de las clases, el resultado obtenido sería el siguiente:
# In[4]:
from matplotlib.patches import Ellipse
def plot_ellipse(ax, mu ,sigma):
vals, vecs = np.linalg.eigh(sigma)
x , y = vecs[:, 0]
theta = np.degrees(np.arctan2(y,x))
w,h = 4* np.sqrt(vals)
ax.tick_params(axis='both',which='major',labelsize=20)
ellipse = Ellipse(mu,w,h,theta,color='k')
ellipse.set_alpha(0.5)
ax.add_artist(ellipse)
# In[20]:
fig, ax = plt.subplots(figsize=(5,5))
ax.set_title('Espacio de caracteristicas', fontsize=14)
ax.set_xlabel('Caracteristica 1')
ax.set_ylabel('Caracteristica 2')
ax.scatter(XC1[0,:], XC1[1,:])
ax.scatter(XC2[0,:], XC2[1,:],color='red')
ax.set_ylim(-1, 4)
ax.set_xlim(-1.5, 5)
plot_ellipse(ax, np.mean(XC1, axis=1) ,np.cov(XC1))
plot_ellipse(ax, np.mean(XC2, axis=1) ,np.cov(XC2))
# En la figura anterior, cada una de las elipses representa la fdp obtenida para cada una de las clases. El centro de la elipse corresponde a su media y la línea corresponde a dos veces la desviación estándar en cada sentido. Como podemos observar en la figura anterior los modelos se ajustan muy mal debido a que las muestras de cada clase no están agrupadas en un solo conglomerado (cluster). En realidad cada clase está a su vez dividida en varios grupos, y lo que necesitamos es un modelo que pueda representar correctamente esos diferentes grupos.
# In[22]:
fig, ax = plt.subplots(figsize=(5,5))
ax.set_title('Espacio de caracteristicas', fontsize=14)
ax.set_xlabel('Caracteristica 1')
ax.set_ylabel('Caracteristica 2')
ax.scatter(XC1[0,:], XC1[1,:])
ax.scatter(XC2[0,:], XC2[1,:],color='red')
ax.set_ylim(-1, 4)
ax.set_xlim(-1.5, 5)
plot_ellipse(ax, np.mean(x1, axis=1) ,np.cov(x1))
plot_ellipse(ax, np.mean(x2, axis=1) ,np.cov(x2))
plot_ellipse(ax, np.mean(x3, axis=1) ,np.cov(x3))
plot_ellipse(ax, np.mean(x4, axis=1) ,np.cov(x4))
# Cada conglomerado estaría entonces representado por un vector de medias ${\bf{\mu}}_{ij}$ (clase $i$, conglomerado $j$) y una matriz de covarianza $\Sigma _{ij}$. Sin embargo en este punto surgen varias preguntas que debemos responder:
# <li> ¿Qué forma tendría la función de densidad de probabilidad de toda una clase? </li>
# <li> ¿En cuántos conglomerados podrían estar agrupadas las muestras? </li>
# <li> ¿Cómo determinar cuáles muestras usar para estimar la media y la covarianza del primer conglomerado y cuáles para el segundo? </li>
# <li> ¿Cómo se determina el número de conglomerados si no se pueden visualizar las muestras porque el número de características es mayor a 3? </li>
# Este tipo de modelos se conocen como <b> Modelos de Mezclas de Funciones Gaussianas</b> (en inglés <b> Gaussian Mixture Models - GMM</b>), y su forma general está dada por la siguiente función:
# $$p({\bf{x}}|\Theta_i) = \sum_{j=1}^{M} \omega_{ij} \mathcal{N}({\bf{x}}|\mu_{ij},\Sigma_{ij})$$
# In[98]:
def GaussProb(X,medias,covars,pesos):
M = len(covars)
N,d = X.shape
Pprob = np.zeros(N).reshape(N,1)
precision = []
for i in range(M):
precision.append(np.linalg.inv(covars[i]))
for j in range(N):
prob = 0
for i in range(M):
tem = (X[j,:]-medias[i])
tem1 = np.dot(np.dot(tem,precision[i]),tem.T)
tem2 = 1/((math.pi**(d/2))*(np.linalg.det(covars[i]))**(0.5))
prob+=pesos[i]*tem2*np.exp(-0.5*tem1)
Pprob[j] = prob
return Pprob
# donde $M$ es el número de conglomerados en los cuales se van a dividir las muestras, $\omega_{ij}$ son pesos que se le asignan a cada conglomerado, es decir, dan una idea de que tan representativo es el conglomerado dentro de la distribución completa de una clase; deben cumplir la restricción: $\sum_{j=1}^{M} \omega_{ij} = 1$, es decir que la suma de los pesos del modelo GMM para una clase debe ser igual a 1.
# El número de conglomerados ($M$) en los cuales se subdivide una clase, es un hiperparámetro del modelo que debe ser ajustado. A partir de las figuras anteriores es fácil determinar que ambas clases están divididas en dos conglomerados, sin embargo, en la gran mayoría de los casos el número de características con las que se cuenta es mucho mayor a 3, razón por la cual no se puede definir el valor de $M$ de manera visual. La forma habitual es utilizar un procedimiento de validación cruzada para hayar el mejor valor de $M$, similar a como debe hacerse para encontrar el mejor valor de $K$ en el modelo de K-vécinos más cercanos.
# El problema de aprendizaje en este caso, corresponde a estimar el conjunto de parámetros $\Theta$ para cada una de las clases, dada un conjunto de muestras de entrenamiento $\mathcal{D} = \left\lbrace \left( {\bf{x}}_k, y_k \right) \right\rbrace _{k=1} ^{N}$. Del total de muestras de entrenamiento, $N_i$ pertenecen a la clase $i$, es decir $\sum_{i=1}^{\mathcal{C}} N_i = N$, donde $\mathcal{C}$ es el número de clases en el conjunto de muestras de entrenamiento ($y_k$ puede tomar valores $1,2,...,\mathcal{C}$), es decir que el modelo de cada clase se ajusta únicamentente utilizando las $N_i$ muestras pertenecientes a dicha clase.
# Como el entrenamiento de un modelo GMM corresponde al ajuste de una fdp, el criterio que nuevamente puede ser de utilidad es el criterio de máxima verosimilitud. Asumiendo entonces que las muestras de entrenamiento de la clase $i$ son i.i.d., podemos expresar el problema de aprendizaje como:
# $$\mathop {\max }\limits_\Theta \log \prod\limits_{k = 1}^{N_i} {p\left( {{\bf{x}}_k |\Theta_i } \right)}$$
# reemplazando la forma general del modelo GMM para la clase $i$:
# $$ = \mathop {\max }\limits_\Theta \log \prod\limits_{k = 1}^{N_i} {\sum\limits_{j = 1}^M {w_{ij}{\mathcal N}\left( {{\bf{x}}_k|\mu _{ij} ,\Sigma _{ij} } \right)} }$$
# $$= \mathop {\max }\limits_\Theta \sum\limits_{k = 1}^{N_i} \log {\sum\limits_{j = 1}^M {w_{ij}
# {\mathcal N}\left( {{\bf{x}}_k|\mu _{ij} ,\Sigma _{ij} } \right)} }$$
# Para encontrar los parámetros que maximizan la función de verosimilitud debemos derivar con respecto a cada uno e igualar a cero. Derivando con respecto a $\mu_{il}$ tenemos:
# $$ 0 = - \sum_{k=1}^{N_i} \frac{{w_{il}}\mathcal{N}({\bf{x}}_k|\mu _{il} ,\Sigma _{il})}{\sum _{j} w _{ij} \mathcal{N}({\bf{x}}_k|\mu _{ij} ,\Sigma _{ij})} \Sigma _{il}({\bf{x}}_k- \mu _{il})$$
# Si observamos con detenimiento el término
# $$\gamma_{kl} = \frac{{w_{il}}\mathcal{N}({\bf{x}}_k|\mu _{il} ,\Sigma _{il})}{\sum _{j} w _{ij} \mathcal{N}({\bf{x}}_k|\mu _{ij} ,\Sigma _{ij})}$$
# Mide la probabilidad de que la muestra ${\bf{x}}_k$ sea generada por el conglomerado $l$ dentro de la clase. A $\gamma _{kl}$ también se le conoce como la responsabilidad de la componente $l$ en la "explicación" de la observación de la muestra ${\bf{x}}_k$.
# Reordenando la derivada de la función de verosimilitud que obtuvimos anteriormente, se obtiene:
#
# $$ \hat \mu _{il} = \frac{1}{n_l} \sum_{k=1}^{N_i} \gamma _{kl} {\bf{x}}_k \;\;\;(*)$$
# donde $n_l = \sum_{k=1}^{N_i} \gamma _{kl}$
# Teniendo en cuenta que $\gamma _{kl}$ me da una idea del "peso" que tiene la componente $l$ del modelo para generar la muestra $k$, $n_l$ puede entenderse como el peso total de la componente $l$ en el modelo (es una suma para todas las muestras de entrenamiento), o el número total de puntos asignados al conglomerado $l$.
# De manera similar se puede derivar con respecto a $\Sigma_{il}$ y obtener:
# $$ \hat \Sigma_{il} = \frac{1}{n_l} \sum_{k=1}^{N_i} \gamma _{kl}({\bf{x}}_k - \mu _{il}) ({\bf{x}}_k - \mu _{il})^{T} \;\;\; (* *)$$
# que es equivalente a la forma de estimación de la matriz de covarianza en el caso de una sola componente, pero sopesando cada muestra con respecto a la responsabilidad del conglomerado bajo análisis.
# Finalmente para la estimación de $w_{ij}$ se hace de manera similar a los dos casos anteriores, pero teniendo en cuenta que los pesos $w$ deben cumplir la restricción estocástica. La función a maximizar en este caso sería:
# $$\mathop {\max }\limits_\Theta \sum\limits_{k = 1}^{N_i} \log {\sum\limits_{j = 1}^M {w_{ij}
# {\mathcal N}\left( {{\bf{x}}_k|\mu _{ij} ,\Sigma _{ij} } \right)} } + \lambda \left(\sum _{j=1}^{M} w _{ij} - 1\right)$$
# donde $\lambda$ es un multiplicador de Lagrange. Derivando e igualando a cero se obtiene:
# $$ 0 = \sum_{k=1}^{N_i} \frac{\mathcal{N}({\bf{x}}_k|\mu _{il} ,\Sigma _{il})}{\sum _{j} w _{ij} \mathcal{N}({\bf{x}}_k|\mu _{ij} ,\Sigma _{ij})} + \lambda$$
# Para poder encontrar el valor de $\lambda$ se puede multiplicar a ambos lados de la ecuación anterior por $w_{il}$
# $$w_{il}\lambda = -\sum_{k=1}^{N_i} \frac{w_{il} \mathcal{N}({\bf{x}}_k|\mu _{il} ,\Sigma _{il})}{\sum _{j} w _{ij} \mathcal{N}({\bf{x}}_k|\mu _{ij} ,\Sigma _{ij})}$$
# sumando a ambos lados con respecto a $l$, fácilmente obtendremos que el valor de $\lambda = -N_i$. Por consiguiente reemplazando el valor de $\lambda$ en la ecuación anterior obtendremos:
# $$\hat w_{il} = \frac{n_l}{n_i} \;\; (** *) $$
# Es importante resaltar que las ecuaciones marcadas con $(*)$ no constituyen una forma cerrada para obtener los valores de los parámetros del modelo, porque todas ellas dependen del valor de $\gamma_{kl}$ que a su vez depende, de una manera compleja, del valor de cada uno de los parámetros. Sin embargo, el resultado proporciona un esquema iterativo simple para encontrar una solución al problema de máxima verosimilitud. El algoritmo que implementa esta solución es conocido como el <b>Algoritmo de Esperanza y Maximización (EM) </b>. Los pasos del algoritmo son:
# <li> Dar un valor inicial a cada uno de los parámetros del modelo </li>
# <li> Paso E: Calcular el valor de $\gamma_{kl}$, note que $\gamma$ es en realidad una matriz que contiene un número de filas igual al número de muestras $N_i$ y un número de columnas igual al número de conglomerados $M$. </li>
# <li> Paso M: Utilizar el valor de $\gamma$ para encontrar unos nuevos valores de los parámetros del modelo usando las ecuaciones $(*)$. </li>
# <li> Repetir consecutivamente los pasos E y M hasta alcanzar convergencia. </li>
# El algoritmo EM no garantiza la convergencia a un máximo global, pero si garantiza que en cada iteración (Repetición de los pasos E y M), la verosimilitud del modelo crece o permanece igual, pero no decrece.
# Veamos un ejemplo. A continuación se van a generar una serie de valores aletorios unidimensionales y graficaremos el histograma para darnos una idea visual de la forma que tiene la distribución de probabilidad del conjunto de datos:
# In[5]:
from time import sleep
from numpy import *
from matplotlib.pylab import *
x1 = np.random.normal(0, 2, 1000)
x2 = np.random.normal(20, 4, 1000)
x3 = np.random.normal(-20, 6, 1000)
X = np.concatenate((x1,x2,x3),axis=0)
Y = np.array(X)[np.newaxis]
Y = Y.T
hist(X,41, (-50,50))
show()
# Aplicamos el algoritmo EM al conjunto de datos anteriores y veremos el resultado del algoritmo para diferentes iteraciones.
# In[7]:
get_ipython().run_line_magic('matplotlib', 'notebook')
import time
from sklearn.mixture import GaussianMixture
xplt = np.linspace(-40, 40, 200)
x1plt = np.array(xplt).reshape(200,1)
fig, ax = plt.subplots(1,1)
gmm = GaussianMixture(n_components=3, covariance_type='full', max_iter=1, verbose=0, verbose_interval=10, means_init=np.array([0,4,10]).reshape(3,1))
gmm.fit(Y)
logprob = np.exp(gmm.score_samples(x1plt))
line1, = ax.plot(xplt,logprob)
ax.set_ylim(0,0.08)
for i in [1,3,7,10,20,50,200,500]:
gmm = GaussianMixture(n_components=3, covariance_type='full', max_iter=i, verbose=0, verbose_interval=10, means_init=np.array([0,4,10]).reshape(3,1))
gmm.fit(Y)
logprob = np.exp(gmm.score_samples(x1plt))
line1.set_ydata(logprob)
fig.canvas.draw()
fig.canvas.flush_events()
time.sleep(.300)
# In[8]:
gmm.means_
# In[10]:
gmm.covariances_
# En el próximo ejemplo se generarán una serie de muestras en dos dimensiones, a partir de un modelo GMM para el cual los valores de los parámetros se han ajustado de manera arbitraria. Posteriormente se usael algoritmo EM para a partir del conjunto de puntos generados, estima los valores de los parámetros del modelo. Al final podremos comparar que tanto se asemejan los parámetros encontrados por el algoritmo con respecto a los parámetros reales.
# In[14]:
get_ipython().run_line_magic('matplotlib', 'inline')
mc = [0.4, 0.4, 0.2] # Mixing coefficients
centroids = [ array([0,0]), array([3,3]), array([0,4]) ]
ccov = [ array([[1,0.4],[0.4,1]]), diag((1,2)), diag((0.4,0.1)) ]
# Generate samples from the gaussian mixture model
x1 = np.random.multivariate_normal(centroids[0], ccov[0], 200)
x2 = np.random.multivariate_normal(centroids[1], ccov[1], 200)
x3 = np.random.multivariate_normal(centroids[2], ccov[2], 100)
X = np.concatenate((x1,x2,x3),axis=0)
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(X[:,0], X[:,1], '.')
n_components = 3
#Expectation-Maximization of Mixture of Gaussians
gmm = GaussianMixture(n_components=n_components, covariance_type='full', max_iter=100, verbose=2, verbose_interval=1)
gmm.fit(X)
for i in range(n_components):
plot_ellipse(ax,gmm.means_[i,:],gmm.covariances_[i,:,:].reshape(2,2))
ax.set_xlim(-3, 7)
ax.set_ylim(-3, 7)
ax.set_xticks(np.arange(-3,8,2))
ax.set_yticks(np.arange(-3,8,2))
# Comparemos los pesos $w$ puestos de manera arbitraria con los pesos hayados por el algoritmo
# In[29]:
print(gmm.weights_)
# Los centros o medias hayados por el algoritmo
# In[12]:
print(gmm.means_)
# Y las matrices de covarianza hayadas por el algoritmo
# In[31]:
print((gmm.covariances_))
# Covarianza diagonal
# In[32]:
#Expectation-Maximization of Mixture of Gaussians
gmm = GaussianMixture(n_components=3, covariance_type='diag', max_iter=100, verbose=2, verbose_interval=1)
gmm.fit(X)
fig, ax = plt.subplots(figsize=(5,5))
ax = plt.subplot(111)
ax.plot(X[:,0], X[:,1], '.')
for i in range(3):
plot_ellipse(ax,gmm.means_[i,:],np.diag(gmm.covariances_[i,:]).reshape(2,2))
ax.set_xlim(-3, 7)
ax.set_ylim(-3, 7)
ax.set_xticks(np.arange(-3,8,2))
ax.set_yticks(np.arange(-3,8,2))
# Covarianza esférica
# In[33]:
#Expectation-Maximization of Mixture of Gaussians
gmm = GaussianMixture(n_components=3, covariance_type='spherical', max_iter=100, verbose=2, verbose_interval=1)
gmm.fit(X)
fig, ax = plt.subplots(figsize=(5,5))
ax = plt.subplot(111)
ax.plot(X[:,0], X[:,1], '.')
for i in range(3):
plot_ellipse(ax,gmm.means_[i,:],np.identity(2)* gmm.covariances_[i])
ax.set_xlim(-3, 7)
ax.set_ylim(-3, 7)
ax.set_xticks(np.arange(-3,8,2))
ax.set_yticks(np.arange(-3,8,2))
# -------------------------------------------------------------------------------------------------------------------------------
[1] Bishop, C.M. Pattern Recognition and Machine Learning. Springer, 2006.
# In[ ]:
| UTF-8 | Python | false | false | 17,353 | py | 66 | Clase 08 - Modelos de Mezclas de Gausianas.py | 34 | 0.689761 | 0.666744 | 0 | 367 | 45.986376 | 639 |
silvanaolmedo/opencv | 12,137,577,601,222 | 6511035b08406aed3f9cd2701238fe652c698cd8 | 9f469b22e57aac3853da16b8d35637063aecabc7 | /src/filters/affine.py | 6ac8f550a5a13adc5330341d2bb2403621d9bafe | [] | no_license | https://github.com/silvanaolmedo/opencv | d019cb50ee27a75474743d94bc922254d124e93c | bb9f92e6ed8a4b0eec81376bddb96b11503db07e | refs/heads/master | 2021-01-25T07:55:25.012702 | 2017-08-24T02:06:28 | 2017-08-24T02:06:28 | 93,688,917 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
#Lee imagen gatito.jpg
img = cv2.imread('gatito.jpg')
#Muestra imagen gatito
cv2.imshow("Original", img)
#Define una alto y largo para la imagen
height, width = img.shape[:2]
img_sized = cv2.resize(img, (width/2, height/2))
cv2.imshow("Resized", img_sized)
translation_matrix = np.float32([[1,0,70],[0,1,100]])
img_warpAff = cv2.warpAffine(img_sized, translation_matrix, (width/2, height/2))
cv2.imshow("Translation", img_warpAff)
cv2.waitKey(0)
cv2.destroyAllWindows() | UTF-8 | Python | false | false | 504 | py | 19 | affine.py | 19 | 0.728175 | 0.676587 | 0 | 20 | 24.25 | 80 |
DaKSK/pythonSDA | 8,873,402,450,064 | d681df4d113f8c7e277b648425a1e347365695b4 | 478da9de91189c9a0b3640e1d47f656066f525ac | /data_structures/tree_exercise.py | ba469d07ded9aa11a5db79abe9fd9ff2cbbcdc02 | [] | no_license | https://github.com/DaKSK/pythonSDA | 1055e4deca94e19512f79bc3d30c99475c2b4249 | 7a3ffe17acd738fdd11f6c7a0491dec16f506690 | refs/heads/main | 2023-07-03T19:30:27.650156 | 2021-07-31T05:57:30 | 2021-07-31T05:57:30 | 365,681,074 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class TreeNode:
def __init__(self, name, size=0, is_directory=True):
self.data = {"name": name, "size": size, "directory": is_directory}
self.parent = None
self.children = []
def __repr__(self):
return self.data["name"]
def add_child(self, child):
child.parent = self
self.children.append(child)
def get_level(self):
level = 0
seeker = self.parent
while seeker:
level += 1
seeker = seeker.parent
return level
def visualize_tree(self):
prefix = " " * self.get_level()
if self.data["directory"]:
print(f"{prefix}|__ {self} -dir")
else:
print(f"{prefix}|__ {self} -file size:{self.data['size']} bytes")
if self.children:
for child in self.children:
child.visualize_tree()
def get_size(self):
if not self.children:
return self.data["size"]
else:
return sum(self.data["size"] + child.get_size() for child in self.children)
def build_tree():
# Building the tree from example5.png
# Creating the root node and it's 2 children nodes
root = TreeNode("Home")
jakub = TreeNode("jakub")
var = TreeNode("var")
root.add_child(jakub)
root.add_child(var)
# Adding children to root.children
jakub.add_child(TreeNode(".bashrc", size=50, is_directory=False))
jakub.add_child(TreeNode(".vimrc", size=100, is_directory=False))
jakub.add_child(TreeNode("blob", size=1023, is_directory=False))
# Creating the grandchild of root
log = TreeNode("log")
var.add_child(log)
# Adding a grandchild to the child of root ("var")
log.add_child(TreeNode("sys.log", size=10, is_directory=False))
return root
if __name__ == "__main__":
dirs = build_tree()
dirs.visualize_tree()
print("Home folder size is", dirs.get_size(), "bytes")
jakub_node_index = dirs.children.index("jakub")
jakub_folder = dirs.children[jakub_node_index]
print("home/jakub/ size is", jakub_folder.get_size())
| UTF-8 | Python | false | false | 2,104 | py | 51 | tree_exercise.py | 46 | 0.590779 | 0.583175 | 0 | 67 | 30.373134 | 87 |
ESA-VirES/ViRES-Server | 8,169,027,824,530 | 2bdc383567a883a792b4113a9652f3dc9a320dc8 | 97b9e6f984684e8c0fc6b0c7e76caefc5079e5c8 | /vires/vires/processes/util/filters/range.py | 8a42affbb3715b3aac3491aeef4e035c0a8f8dfe | [] | no_license | https://github.com/ESA-VirES/ViRES-Server | d65cbf7b62a1457f26e7595b25466559f3b6b590 | bee4167fbed6c689d7c46c34224561a281ee6a2b | refs/heads/master | 2021-04-22T05:00:57.065728 | 2021-04-15T17:11:22 | 2021-04-15T17:11:22 | 48,378,505 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-------------------------------------------------------------------------------
#
# Data filters - scalar and vector component range filters
#
# Authors: Martin Paces <martin.paces@eox.at>
#-------------------------------------------------------------------------------
# Copyright (C) 2016 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
# pylint: disable=too-many-arguments
from logging import getLogger, LoggerAdapter
from vires.util import between
from .base import Filter
from .exceptions import FilterError
class BaseRangeFilter(Filter):
""" Base scalar value range filter. """
# pylint: disable=abstract-method
class _LoggerAdapter(LoggerAdapter):
def process(self, msg, kwargs):
return 'filter %s: %s' % (self.extra["variable"], msg), kwargs
def __init__(self, variable, vmin, vmax, logger):
self.variable = variable
self.vmin = vmin
self.vmax = vmax
self.logger = logger
@property
def label(self):
""" Get filter label. """
return self.variable
@property
def required_variables(self):
return (self.variable,)
def _filter(self, data):
""" Low level filter. """
self.logger.debug("value range: %s %s", self.vmin, self.vmax)
self.logger.debug("initial size: %d", data.shape[0])
return between(data, self.vmin, self.vmax)
def __str__(self):
return "%s:%.17g,%.17g" % (self.label, self.vmin, self.vmax)
class ScalarRangeFilter(BaseRangeFilter):
""" Simple scalar value range filter. """
def __init__(self, variable, vmin, vmax, logger=None):
BaseRangeFilter.__init__(
self, variable, vmin, vmax, self._LoggerAdapter(
logger or getLogger(__name__), {"variable": variable}
)
)
def filter(self, dataset, index=None):
data = dataset[self.variable]
if data.ndim != 1:
raise FilterError(
"An attempt to apply a scalar range filter to a non-scalar "
"variable %s!" % self.variable
)
if index is None:
index = self._filter(data).nonzero()[0]
else:
index = index[self._filter(data[index])]
self.logger.debug("filtered size: %d", index.size)
return index
class VectorComponentRangeFilter(BaseRangeFilter):
""" Single vector component range filter. """
def __init__(self, variable, component, vmin, vmax, logger=None):
BaseRangeFilter.__init__(
self, variable, vmin, vmax, self._LoggerAdapter(
logger or getLogger(__name__), {
"variable": "%s[%s]" % (variable, component)
}
)
)
self.component = component
@property
def label(self):
return "%s[%d]" % (self.variable, self.component)
def filter(self, dataset, index=None):
data = dataset[self.variable]
if data.ndim != 2:
raise FilterError(
"An attempt to apply a vector component range filter to a "
"non-vector variable %s!" % self.variable
)
if index is None:
index = self._filter(data[:, self.component]).nonzero()[0]
else:
index = index[self._filter(data[index, self.component])]
self.logger.debug("filtered size: %d", index.size)
return index
| UTF-8 | Python | false | false | 4,542 | py | 21 | range.py | 14 | 0.598855 | 0.595993 | 0 | 122 | 36.229508 | 80 |
vesteves33/cursoPython | 910,533,074,187 | c44f71c8a332a0748b89fb52a0730efef8b994aa | cfa63ebda7709d3aa15a778f17fa83cf973c046c | /Exercicios/ex052.py | f0a52d35084866907942bd24afceae5b15f1a017 | [] | no_license | https://github.com/vesteves33/cursoPython | 7fef2a8cc120f6a879badf8aba2203907463519e | 9513a1ecd00193cc650df352db7c33d9c8abfaa7 | refs/heads/main | 2023-03-21T02:23:29.391563 | 2021-02-23T20:59:48 | 2021-02-23T20:59:48 | 328,348,873 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | num = int(input('Digite um número inteiro: '))
totalContagem = 0
for contador in range(1, num+1):
if num % contador == 0:
totalContagem += 1
if totalContagem == 2:
print('Número {} é primo'.format(num))
else:
print('Número {} não é primo'.format(num)) | UTF-8 | Python | false | false | 282 | py | 69 | ex052.py | 67 | 0.623188 | 0.601449 | 0 | 10 | 26.7 | 46 |
higornucci/classificacao-aulas | 3,504,693,323,584 | 401f084771e500f1881fb875133112fe2cb7ecec | 132373547f88d59cd87d8f99b81d9df1e306c1e4 | /handson/housing.py | 96514948ab81f1c108b034331367d1b67e010459 | [] | no_license | https://github.com/higornucci/classificacao-aulas | be117acd0907b044fed5aaccbf147c82c28cb39b | 1dfdc15c6ddbfa7ba5217f63d6d43c73b3b4bb0b | refs/heads/master | 2021-06-04T03:30:54.553705 | 2020-06-18T22:40:37 | 2020-06-18T22:40:37 | 110,111,682 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division, print_function, unicode_literals
import os
import tarfile
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from six.moves import urllib
from future_encoders import ColumnTransformer, OneHotEncoder
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split, cross_val_score, GridSearchCV
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import LabelEncoder, LabelBinarizer, StandardScaler, Imputer
from sklearn.ensemble import RandomForestRegressor
warnings.filterwarnings(action="ignore", message="^internal gelsd")
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 2000)
np.random.seed(42)
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room=True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
# class DataFrameSelector(BaseEstimator, TransformerMixin):
# def __init__(self, attribute_names):
# self.attribute_names = attribute_names
#
# def fit(self, X, y=None):
# return self
#
# def transform(self, X):
# return X[self.attribute_names].values
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "end_to_end_project"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
def income_cat_proportions(data):
return data["income_cat"].value_counts() / len(data)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
housing = load_housing_data()
print(housing.head(30))
print(housing.info())
print(housing["ocean_proximity"].value_counts())
print(housing.describe())
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
print(strat_test_set["income_cat"].value_counts() / len(strat_test_set))
print(housing["income_cat"].value_counts() / len(housing))
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": income_cat_proportions(housing),
"Stratified": income_cat_proportions(strat_test_set),
"Random": income_cat_proportions(test_set),
}).sort_index()
compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
print(compare_props)
for set_ in (strat_train_set, strat_test_set):
set_.drop(["income_cat"], axis=1, inplace=True)
# visualizando os dados
housing = strat_train_set.copy()
# california_img = mpimg.imread(PROJECT_ROOT_DIR + '/images/end_to_end_project/california.png')
# ax = housing.plot(kind="scatter", x="longitude", y="latitude", figsize=(10,7),
# s=housing['population']/100, label="Population",
# c="median_house_value", cmap=plt.get_cmap("jet"),
# colorbar=False, alpha=0.4,
# )
# plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5,
# cmap=plt.get_cmap("jet"))
# plt.ylabel("Latitude", fontsize=14)
# plt.xlabel("Longitude", fontsize=14)
#
# prices = housing["median_house_value"]
# tick_values = np.linspace(prices.min(), prices.max(), 11)
# cbar = plt.colorbar()
# cbar.ax.set_yticklabels(["$%dk"%(round(v/1000)) for v in tick_values], fontsize=14)
# cbar.set_label('Median House Value', fontsize=16)
#
# plt.legend(fontsize=16)
# save_fig("california_housing_prices_plot")
# plt.show()
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
# attributes = ["median_house_value", "median_income", "total_rooms",
# "housing_median_age"]
# scatter_matrix(housing[attributes], figsize=(12, 8))
# save_fig("scatter_matrix_plot")
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# preparando para machine learning
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
housing.dropna(subset=["total_bedrooms"]) # remove as linhas que contêm valores nulos
housing.drop("total_bedrooms", axis=1) # remove a coluna inteira
median = housing["total_bedrooms"].median()
housing["total_bedrooms"].fillna(median) # substitui os valores nulos pela mediana
imputer = Imputer(strategy="median")
housing_num = housing.drop("ocean_proximity", axis=1) # remover atributos não numéricos
imputer.fit(housing_num) # usar sklearn para completar os valores nulos com a mediana
print(imputer.statistics_)
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
encoder = LabelEncoder() # pŕoblema que os algoritmos de ml acham que categorias mais próximas são similares
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat)
print(housing_cat_encoded)
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))
print(housing_cat_1hot)
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat)
print(housing_cat_1hot)
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
housing_extra_attribs = pd.DataFrame(
housing_extra_attribs,
columns=list(housing.columns)+["rooms_per_household", "population_per_household"])
print(housing_extra_attribs.head())
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
# ('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
# cat_pipeline = Pipeline([
# ('selector', DataFrameSelector(cat_attribs)),
# ('cat_encoder', OneHotEncoder()),
# ])
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
housing_prepared = full_pipeline.fit_transform(housing)
print(housing_prepared)
print(housing_prepared.shape)
# Trainando o modelo
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:\t", lin_reg.predict(some_data_prepared))
print("Labels:\t\t", list(some_labels))
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse)
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse)
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring='neg_mean_squared_error', cv=10)
rmse_scores = np.sqrt(-scores)
display_scores(rmse_scores)
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared, housing_labels)
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
print(forest_rmse)
scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring='neg_mean_squared_error', cv=10)
rmse_scores = np.sqrt(-scores)
display_scores(rmse_scores)
param_grid = [
{'n_estimators': [3, 10, 30, 40, 50], 'max_features': [2, 4, 5, 6, 7, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared, housing_labels)
print(grid_search.best_params_)
print(grid_search.best_estimator_)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres['mean_test_score'], cvres['params']):
print(np.sqrt(-mean_score), params)
feature_importances = grid_search.best_estimator_.feature_importances_
print(feature_importances)
extra_attribs = ['rooms_per_hhold', 'pop_per_hhold', 'bedrooms_per_rooms']
cat_one_hot_attribs = list(encoder.classes_)
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances))
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
Y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(Y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
display_scores(final_rmse)
| UTF-8 | Python | false | false | 11,301 | py | 86 | housing.py | 29 | 0.71359 | 0.701549 | 0 | 308 | 35.672078 | 111 |
Cornex-Inc/Coffee | 9,826,885,178,144 | d31292a1af2ea2bd80b27eb0ba0b626d36bc2742 | 7deda84f7a280f5a0ee69b98c6a6e7a2225dab24 | /Radiation/views.py | 310906cad23ae433ea4cc5a41883be3cf74c5547 | [] | no_license | https://github.com/Cornex-Inc/Coffee | 476e30f29412373fb847b2d518331e6c6b9fdbbf | fcd86f20152e2b0905f223ff0e40b1881db634cf | refs/heads/master | 2023-01-13T01:56:52.755527 | 2020-06-08T02:59:18 | 2020-06-08T02:59:18 | 240,187,025 | 0 | 0 | null | false | 2023-01-05T23:58:52 | 2020-02-13T05:47:41 | 2023-01-05T19:41:25 | 2023-01-05T23:58:51 | 66,217 | 1 | 0 | 45 | Python | false | false | from django.shortcuts import render,redirect
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
import functools
import operator
from django.db.models import Q,Case,When, CharField,Count,Sum
from .forms import *
from Laboratory.forms import *
from .models import *
# Create your views here.
@login_required
def index(request):
form = RadiationForm()
search_form=PrecedureManageForm()
error = False
if 'save' in request.POST:
form = RadiationForm(request.POST, request.FILES,)
selected_radi_manage = request.POST['selected_test_manage']
if selected_radi_manage is not '':
selected_img_id = request.POST['id']
precedure = PrecedureManager.objects.get(pk = selected_radi_manage)
if selected_img_id is '': #new
radi_manage = RadiationManage()
radi_manage.progress = 'done'
radi_manage.name_service = precedure.precedure.name
radi_manage.manager = precedure
radi_manage.save()
else:
radi_manage = RadiationManage.objects.get(pk = selected_img_id)
if form.is_valid():
form.instance.id = radi_manage.id
form.instance.manager_id= radi_manage.manager.id
form.instance.progress = 'done'
form.instance.date_ordered = radi_manage.date_ordered
form.instance.name_service = radi_manage.name_service
form.save()
return redirect('/radiation/')
else:
error = 'select patient.'
depart = Depart.objects.all()
request.POST = request.POST.copy()
if 'selected_test_manage' in request.POST:
request.POST['selected_test_manage']=''
if 'id' in request.POST:
request.POST['id']=''
if 'save' in request.POST:
request.POST['save']=''
request.FILES['image'] = None
return render(request,
'Radiation/index.html',
{
'form':form,
'search':search_form,
'error':error,
'depart':depart,
},
)
def get_image(request):
manage_id = request.POST.get('manage_id')
manage = RadiationManage.objects.get(pk = manage_id)
datas={
'id':manage.id,
}
if manage.image:
datas.update({
'id':manage.id,
'path':manage.image.url,
'remark':manage.remark,
})
return JsonResponse(datas)
def zoom_in(request,img_id):
try:
img = RadiationManage.objects.get(pk = img_id)
except RadiationManage.DoesNotExist:
return
return render(request,
'Radiation/zoomin.html',
{
'img_url':img.image.url,
},
)
def waiting_list(request):
date_start = request.POST.get('start_date')
date_end = request.POST.get('end_date')
filter = request.POST.get('filter')
input = request.POST.get('input').lower()
depart_id = request.POST.get('depart_id')
kwargs={}
date_min = datetime.datetime.combine(datetime.datetime.strptime(date_start, "%Y-%m-%d").date(), datetime.time.min)
date_max = datetime.datetime.combine(datetime.datetime.strptime(date_end, "%Y-%m-%d").date(), datetime.time.max)
argument_list = []
kwargs={}
if depart_id != '' :
kwargs['diagnosis__reception__depart_id'] = depart_id
if input !='':
argument_list.append( Q(**{'diagnosis__reception__patient__name_kor__icontains':input} ) )
argument_list.append( Q(**{'diagnosis__reception__patient__name_eng__icontains':input} ) )
argument_list.append( Q(**{'diagnosis__reception__patient__id__icontains':input} ) )
radios =PrecedureManager.objects.select_related(
'diagnosis__reception__patient'
).select_related(
'precedure'
).filter(
functools.reduce(operator.or_, argument_list),
**kwargs,
precedure__code__icontains='R',
diagnosis__recorded_date__range= (date_min,date_max),
).exclude(diagnosis__reception__progress='deleted')
else:
radios =PrecedureManager.objects.select_related(
'diagnosis__reception__patient'
).select_related(
'precedure'
).filter(
**kwargs,
precedure__code__icontains='R',
diagnosis__recorded_date__range= (date_min,date_max),
).exclude(diagnosis__reception__progress='deleted')
datas = []
for radio in radios:
data= {
'chart':radio.diagnosis.reception.patient.get_chart_no(),
'name_kor':radio.diagnosis.reception.patient.name_kor,
'name_eng':radio.diagnosis.reception.patient.name_eng,
'Depart':radio.diagnosis.reception.depart.name,
'Doctor':radio.diagnosis.reception.doctor.name_kor,
'Date_of_Birth': radio.diagnosis.reception.patient.date_of_birth.strftime('%Y-%m-%d'),
'Gender/Age':'(' + radio.diagnosis.reception.patient.get_gender_simple() +
'/' + str(radio.diagnosis.reception.patient.get_age()) + ')',
'name_service':radio.precedure.name if radio.precedure.name else radio.precedure.name_vie,
'date_ordered':'' if radio.diagnosis.reception.recorded_date is None else radio.diagnosis.reception.recorded_date.strftime('%Y-%m-%d %H:%M'),
'precedure_manage_id':radio.id,#radi_manage_id
}
check_done = RadiationManage.objects.filter(manager_id = radio.id).count()
if check_done == 0:
data.update({ 'progress':'new', })
else:
data.update({ 'progress':'done', })
datas.append(data)
context = {'datas':datas}
return JsonResponse(context)
def waiting_selected(request):
radi_manage_id = request.POST.get('radi_manage_id')
precedure = PrecedureManager.objects.get(pk = radi_manage_id)
radi_images = RadiationManage.objects.filter(manager_id = radi_manage_id)
datas = {}
for radi_image in radi_images:
date = radi_image.date_ordered.strftime('%Y-%m-%d')
if date not in datas:
datas[date] = []
data = {
'path':radi_image.image.url if radi_image.image else '',
'id':radi_image.id,
'service':radi_image.name_service,
'remark':radi_image.remark,
}
datas[date].append(data)
context = {
'datas':datas,
'chart':precedure.diagnosis.reception.patient.get_chart_no(),
'Name':precedure.diagnosis.reception.patient.name_kor + ' ' + precedure.diagnosis.reception.patient.name_eng,
'Date_of_birth':precedure.diagnosis.reception.patient.date_of_birth.strftime('%Y-%m-%d') +
'(' + precedure.diagnosis.reception.patient.get_gender_simple() +
'/' + str(precedure.diagnosis.reception.patient.get_age()) + ')',};
context.update({
'Lab':precedure.precedure.name if precedure.precedure.name else precedure.precedure.name_vie,
'date_ordered':'' if precedure.diagnosis.reception.recorded_date is None else precedure.diagnosis.reception.recorded_date.strftime('%Y-%m-%d %H:%M') ,
'Depart':precedure.diagnosis.reception.depart.name + ' ( ' + precedure.diagnosis.reception.doctor.name_kor + ' )',
})
return JsonResponse(context)
def delete_image(request):
image_id = request.POST.get('image_id')
RadiationManage.objects.get(pk = image_id).delete()
res = 'success'
return JsonResponse({'result':res}) | UTF-8 | Python | false | false | 7,919 | py | 415 | views.py | 267 | 0.583533 | 0.583407 | 0 | 216 | 35.666667 | 158 |
project-rig/pynn_spinnaker | 11,544,872,097,212 | 42d35ba0d557f1d059562fe3e3bbc8bf14c37341 | c2dec05f3eb894e616acadd4e3c30ac697fd2656 | /pynn_spinnaker/spinnaker/regions/__init__.py | f2b56ef349325633d6e113e4842b934c63cc046d | [] | no_license | https://github.com/project-rig/pynn_spinnaker | da1ee4c52dbf0015cec0a5f2676f4e031032848d | 89e9bdba78157804f491948bd3d630101d7b9cb6 | refs/heads/master | 2020-04-09T16:52:06.658914 | 2016-12-19T14:14:37 | 2016-12-19T14:14:37 | 31,414,869 | 0 | 2 | null | false | 2016-12-19T14:12:17 | 2015-02-27T10:39:38 | 2016-10-28T09:48:17 | 2016-12-19T14:12:16 | 1,824 | 0 | 3 | 19 | Python | null | null | from analogue_recording import AnalogueRecording
from connection_builder import ConnectionBuilder
from delay_buffer import DelayBuffer
from extended_plastic_synaptic_matrix import ExtendedPlasticSynapticMatrix
from flush import Flush
from homogeneous_parameter_space import HomogeneousParameterSpace
from input_buffer import InputBuffer
from key_lookup_binary_search import KeyLookupBinarySearch
from neuron import Neuron
from output_buffer import OutputBuffer
from output_weight import OutputWeight
from parameter_space import ParameterSpace
from plastic_synaptic_matrix import PlasticSynapticMatrix
from sdram_back_prop_input import SDRAMBackPropInput
from sdram_back_prop_output import SDRAMBackPropOutput
from spike_recording import SpikeRecording
from spike_source_array import SpikeSourceArray
from spike_source_poisson import SpikeSourcePoisson
from static_synaptic_matrix import StaticSynapticMatrix
from synaptic_matrix import SynapticMatrix
| UTF-8 | Python | false | false | 951 | py | 112 | __init__.py | 105 | 0.883281 | 0.883281 | 0 | 20 | 46.55 | 74 |
lse13/Question2- | 14,199,161,895,800 | fa9693f1bc1478134a4cedd9d5c2e7979b6d3bba | d644e45fcf3b1e9ab1afa71a0f2e657ee77a755c | /ex1.py | 5d1cbae1b2c29255c20598dc720c472bb0eefffc | [] | no_license | https://github.com/lse13/Question2- | f3b30b2d786f4c1c86e55bb0702605469bd32e00 | a38484308babd5894a147d898db339b5a80f3553 | refs/heads/master | 2020-08-30T04:40:39.029858 | 2019-10-29T11:03:24 | 2019-10-29T11:03:24 | 218,266,429 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Exercise 1: Work with the person next to you to design
# classes to manage the products, customers, and purchase
# orders for an online book store such as amazon.com.
# Outline the data attributes and useful methods for
# each class. You can discuss and create the outline together.
class Amazon_orders(object):
def __init__(self):
self.products=None
self.customers=None
def add_products(self,product_list):
self.product.append(product_list)
def get_products(self):
return self.products
def get_customers(self):
return self.customers
def set_customers(self, customers_list):
self.customers = customers_list
| UTF-8 | Python | false | false | 666 | py | 1 | ex1.py | 1 | 0.717718 | 0.716216 | 0 | 23 | 27.826087 | 63 |
suritechiez03/IMS_Python | 1,786,706,420,686 | f4c2434ccc80a8fdd6b16a7312631031aa8f3045 | 40fd858c4a7b08b02e1030fe33f9e6541d67b8c6 | /ordermanagement/urls.py | 98160b1b3b1d39468db35d0e9ad2ebed4d23e7db | [] | no_license | https://github.com/suritechiez03/IMS_Python | 366ac1ae6a5309ebc2a7c2686a16320235dfdd99 | 1280cd49f8a1d8b3a1a815ddf53f2f18ea10b0a9 | refs/heads/master | 2021-05-10T08:42:02.362882 | 2018-09-10T09:28:41 | 2018-09-10T09:28:41 | 118,899,316 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^api/save_order$',views.save_order, name="save_order"),
url(r'^api/get_order_list$', views.get_order_list, name="get_order_list"),
url(r'^api/get_order_details/(?P<order_number>[0-9]+)/$', views.get_order_list_by_number, name="get_order_details"),
url(r'^api/remove_product', views.remove_product, name="remove_product"),
url(r'^api/get_order_by_customer$', views.get_order_by_customer, name="get_order_by_customer"),
] | UTF-8 | Python | false | false | 560 | py | 84 | urls.py | 62 | 0.671429 | 0.667857 | 0 | 13 | 42.153846 | 120 |
bopopescu/dbfinal | 9,191,230,052,159 | 19b8672e96c94215a52120366308b079c3a01ba0 | 4d063e7bec6226c34cab2233d6e3e84d146ffaa3 | /gtmd/models/Order.py | 3e832a7839353edc748ff9aa8f4ef50804a10114 | [] | no_license | https://github.com/bopopescu/dbfinal | 9e3b4fd405fb6ec5b2b99d5151693b226d0799d8 | 02b17b3f5e492d6392077cddc2872f574c3fa833 | refs/heads/master | 2022-11-19T23:15:42.912480 | 2019-12-31T15:46:10 | 2019-12-31T15:46:10 | 281,665,426 | 0 | 0 | null | true | 2020-07-22T12:00:53 | 2020-07-22T12:00:52 | 2019-12-31T15:46:13 | 2019-12-31T15:46:11 | 41,294 | 0 | 0 | 0 | null | false | false | from gtmd.app import db
import datetime
class Order(db.Model):
# 订单id
order_id = db.Column(db.String, primary_key=True, index=True, nullable=False)
# 用户id
buyer_id = db.Column(db.String, index=True, nullable=False)
# 商店id
store_id = db.Column(db.String, index=True, nullable=False)
# 创建时间
createtime = db.Column(db.DATETIME, default=datetime.datetime.now, nullable=False)
# 订单状态
status = db.Column(db.String, nullable=False)
orderdetail = db.relationship("Orderdetail", backref="orderdetail")
| UTF-8 | Python | false | false | 568 | py | 33 | Order.py | 24 | 0.694444 | 0.694444 | 0 | 17 | 30.764706 | 86 |
freelawproject/juriscraper | 11,501,922,452,240 | dcd2dc30acb601dfd4f58df80a071fe2220609be | 362196f32e8248e025cb2f6cf0b88f812c9a059c | /juriscraper/opinions/united_states/federal_appellate/ca9_u.py | 9b65804cd9453cb372e89d3694e4c87ee25a7184 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | https://github.com/freelawproject/juriscraper | 0fea8d4bb512808cb1e036aaaf819e9cc0847a6b | d2c6672696e13e33ec9981a1901b87047d8108c5 | refs/heads/main | 2023-08-09T13:27:21.357915 | 2023-07-06T22:33:01 | 2023-07-06T22:33:01 | 22,757,589 | 283 | 97 | BSD-2-Clause | false | 2023-09-08T22:59:36 | 2014-08-08T12:50:35 | 2023-09-08T16:49:38 | 2023-09-08T22:59:36 | 53,996 | 287 | 81 | 87 | HTML | false | false | """
History:
- 2014-08-05: Updated by mlr because it was not working, however, in middle
of update, site appeared to change. At first there were about five
columns in the table and scraper was failing. Soon, there were seven and
the scraper started working without my fixing it. Very odd.
- 2023-01-13: Update to use RSS Feed
"""
from juriscraper.opinions.united_states.federal_appellate import ca9_p
class Site(ca9_p.Site):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = "https://www.ca9.uscourts.gov/memoranda/index.xml"
self.status = "Unpublished"
| UTF-8 | Python | false | false | 683 | py | 4,602 | ca9_u.py | 327 | 0.666179 | 0.63836 | 0 | 18 | 36.944444 | 79 |
kirina001/musicPytest | 7,602,092,136,731 | 77b731d71920ac37a473e39e9f22348aa67349c6 | 2177cad4601dff84ce9e9f6ccd59051a5c4fc5dd | /test/test01app.py | 9fc9dadf03c6464f57e48bd8df8db5c5dc41687b | [] | no_license | https://github.com/kirina001/musicPytest | 4eaaae63547fe7e48d5040f3202bb6a2ab2cd6ce | cb3e488be7fa7b7036a169029eb095866773eb48 | refs/heads/master | 2020-07-12T17:09:04.260323 | 2019-08-28T07:32:55 | 2019-08-28T07:32:55 | 204,869,970 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: UTF-8 -*-
conf = {
"platformName": "Android",
"platformVersion": "6.0.1",
"deviceName": "127.0.0.1:7555",
"appPackage": "com.FreshAir",
"appActivity": ".activity.WelcomeActivity",
"noReset": "true",
"unicodeKeyboard": "true",
"resetKeyboard": "true"
}
c = eval(str(conf))
print(type(c))
| UTF-8 | Python | false | false | 335 | py | 26 | test01app.py | 24 | 0.58209 | 0.540299 | 0 | 15 | 21.333333 | 47 |
Davies-Sam/Genetris | 4,569,845,230,733 | 6ce3b8140178ab76be6cdafbf7200460f95f2bc3 | 38fb7643782351fa9fab631aac6037ba63f82a59 | /tetris.py | f53c7e9677b174128d6ab8ec8ff61e8f1da5bd47 | [
"MIT"
] | permissive | https://github.com/Davies-Sam/Genetris | c2f2589414fc08e560d5ce3fafb7d445fb3b0203 | 1d46d2d975baffd7c08933170ed0cc744b8e00a2 | refs/heads/master | 2021-06-22T03:05:22.816896 | 2021-05-27T23:56:51 | 2021-05-27T23:56:51 | 222,779,924 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import pygame, sys
from copy import deepcopy
import numpy
CELL_SIZE = 30
COLS = 10
ROWS = 22
MAXFPS = 30
PIECELIMIT = float("inf")
DROP_TIME = 60
DRAW = True
tetris_shapes = [
[[1, 1, 1],
[0, 1, 0]],
[[0, 2, 2],
[2, 2, 0]],
[[3, 3, 0],
[0, 3, 3]],
[[4, 0, 0],
[4, 4, 4]],
[[0, 0, 5],
[5, 5, 5]],
[[6, 6, 6, 6]],
[[7, 7],
[7, 7]]
]
colors = [
(0, 0, 0 ),
(255, 85, 85),
(100, 200, 115),
(120, 108, 245),
(255, 140, 50 ),
(50, 120, 52 ),
(146, 202, 73 ),
(150, 161, 218 ),
(35, 35, 35) # Helper color for background grid
]
#rotates the pieces clockwise
def rotate_clockwise(shape):
return [ [ shape[y][x] for y in range(len(shape)) ] for x in range(len(shape[0]) - 1, -1, -1) ]
#checks that no pieces are overlapping
def check_collision(board, shape, offset):
off_x, off_y = offset
for cy, row in enumerate(shape):
for cx, cell in enumerate(row):
try:
if cell and board[ cy + off_y ][ cx + off_x ]:
return True
except IndexError:
return True
return False
#removes a row from the board
def remove_row(board, row):
del board[row]
return [[0 for i in range(COLS)]] + board
#adds a placed piece to the board
def join_matrixes(mat1, mat2, mat2_off):
mat3 = deepcopy(mat1)
off_x, off_y = mat2_off
for cy, row in enumerate(mat2):
for cx, val in enumerate(row):
mat3[cy+off_y-1][cx+off_x] += val
return mat3
#create the board
def new_board():
board = [ [ 0 for x in range(COLS) ] for y in range(ROWS) ]
#next line not needed, just there for clarity (adds a base row to the grid)
board += [[ 1 for x in range(COLS)]]
return board
class TetrisApp(object):
def __init__(self, genetics):
self.DROPEVENT = pygame.USEREVENT + 1
pygame.init()
pygame.display.set_caption("Final Project")
pygame.key.set_repeat(250,25)
self.width = CELL_SIZE * (COLS+10)
self.height = CELL_SIZE * ROWS
self.rlim = CELL_SIZE * COLS
self.bground_grid = [[ 8 if x%2==y%2 else 0 for x in range(COLS)] for y in range(ROWS)]
self.default_font = pygame.font.Font(pygame.font.get_default_font(), 11)
if DRAW:
self.screen = pygame.display.set_mode((self.width, self.height))
self.next_stone = tetris_shapes[5]
self.linesCleared = 0
self.gameover = False
self.genetics = genetics
self.ai = None
self.limit = PIECELIMIT
self.piecesPlayed = 0
if self.genetics.sequenceType == "fixed":
self.init_game(self.genetics.seed)
elif self.genetics.sequenceType == "random":
self.init_game(numpy.random.random())
def new_stone(self):
self.stone = self.next_stone
nextStone = random.randint(0, len(tetris_shapes)-1)
self.next_stone = tetris_shapes[nextStone]
self.stone_x = COLS//2 - len(self.stone[0])//2
self.stone_y = 0
self.score += 1
self.piecesPlayed += 1
if check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.gameover = True
if self.genetics:
#print(self.linesCleared)
self.genetics.GameOver(self.linesCleared)
def init_game(self,seed):
random.seed(seed)
self.board = new_board()
self.score = 0
self.linesCleared = 0
#start every game with a flat piece
self.next_stone = tetris_shapes[6]
self.new_stone()
pygame.time.set_timer(self.DROPEVENT, DROP_TIME)
def disp_msg(self, msg, topleft):
x,y = topleft
for line in msg.splitlines():
self.screen.blit(self.default_font.render(line, False, (255,255,255), (0,0,0)), (x,y))
y+=14
def center_msg(self, msg):
for i, line in enumerate(msg.splitlines()):
msg_image = self.default_font.render(line, False,
(255,255,255), (0,0,0))
msgim_center_x, msgim_center_y = msg_image.get_size()
msgim_center_x //= 2
msgim_center_y //= 2
self.screen.blit(msg_image, (
self.width // 2-msgim_center_x,
self.height // 2-msgim_center_y+i*22))
def draw_matrix(self, matrix, offset):
off_x, off_y = offset
for y, row in enumerate(matrix):
for x, val in enumerate(row):
if val:
#corrupt board exception from https://tinyurl.com/wu7gl48
try:
pygame.draw.rect(self.screen, colors[val],
pygame.Rect((off_x+x)*CELL_SIZE, (off_y+y)*CELL_SIZE, CELL_SIZE, CELL_SIZE), 0)
except IndexError:
pass
#print("Corrupted board")
#self.print_board()
def add_cl_lines(self, n):
linescores = [0, 40, 100, 300, 1200]
self.score += linescores[n]
self.linesCleared += n
def move_to(self, x):
self.move(x - self.stone_x)
def move(self, delta_x):
if not self.gameover:
new_x = self.stone_x + delta_x
if new_x < 0:
new_x = 0
if new_x > COLS - len(self.stone[0]):
new_x = COLS - len(self.stone[0])
if not check_collision(self.board, self.stone, (new_x, self.stone_y)):
self.stone_x = new_x
def drop(self):
if not self.gameover:
self.stone_y += 1
if check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.board = join_matrixes(self.board, self.stone, (self.stone_x, self.stone_y))
self.new_stone()
cleared_rows = 0
for i, row in enumerate(self.board[:-1]):
if 0 not in row:
self.board = remove_row(self.board, i)
cleared_rows += 1
self.add_cl_lines(cleared_rows)
if self.ai:
self.ai.update_board()
return True
return False
def insta_drop(self):
if not self.gameover:
while not self.drop():
pass
def rotate_stone(self):
if not self.gameover:
new_stone = rotate_clockwise(self.stone)
if not check_collision(self.board, new_stone, (self.stone_x, self.stone_y)):
self.stone = new_stone
def start_game(self,seed):
if self.gameover:
self.init_game(seed)
self.gameover = False
def quit(self):
self.center_msg("exiting...")
pygame.display.update()
""" make sure fitnesses are recorded
for a in self.genetics.population:
print(a.fitness)
print("\n")
"""
sys.exit()
def ai_toggle_instantPlay(self):
if self.ai:
self.ai.instantPlay = not self.ai.instantPlay
def print_board(self):
i=0
for row in self.board:
print(self.board[i])
print('\n')
i+=1
"""for testing
import heuristics
print("height %s" % heuristics.TotalHeight(self.board))
print("bump %s" % heuristics.Bumpiness(self.board))
print("holes %s" % heuristics.HolesCreated(self.board))
print("linesc %s" % heuristics.LinesCleared(self.board))
print("connectedholes %s" % heuristics.ConnectedHoles(self.board))
print("blockade %s" % heuristics.Blockades(self.board))
print("altDelta %s" % heuristics.AltitudeDelta(self.board))
print("WeighteBlocks %s" % heuristics.WeightedBlocks(self.board))
print("Horiz R %s" % heuristics.HorizontalRoughness(self.board))
print("Vert R %s" % heuristics.VerticalRoughness(self.board))
print("wells %s" % heuristics.Wells(self.board))
print("max well %s" % heuristics.MaxWell(self.board))
"""
def run(self):
key_actions = {
'ESCAPE': self.quit,
'LEFT': lambda: self.move(-1),
'RIGHT': lambda: self.move(+1),
'DOWN': self.drop,
'UP': self.rotate_stone,
'RETURN': self.insta_drop,
'p': self.ai_toggle_instantPlay,
't' : self.print_board
}
clock = pygame.time.Clock()
while True:
if DRAW:
self.screen.fill((0,0,0))
if self.gameover:
self.center_msg("Game Over!\nYour score: %d\nPress space to continue" % self.score)
else:
pygame.draw.line(self.screen, (255,255,255),
(self.rlim+1, 0), (self.rlim+1, self.height-1))
self.disp_msg("Next:", (self.rlim+CELL_SIZE, 2))
self.disp_msg("Score: %d" % self.score, (self.rlim+CELL_SIZE, CELL_SIZE*5))
if self.ai and self.genetics:
chromosome = self.genetics.population[self.genetics.current_organism]
self.disp_msg("Generation: %s" % self.genetics.current_generation, (self.rlim+CELL_SIZE, CELL_SIZE*5))
self.disp_msg("\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n %s: %s\n" % (
"Organism #", self.genetics.current_organism,
"Name", chromosome.name,
"Played", chromosome.played,
"Fitness", chromosome.fitness,
"Age", chromosome.age,
"Height", chromosome.heuristics[0],
"Bumpiness", chromosome.heuristics[1],
"Holes", chromosome.heuristics[2],
"Lines", chromosome.heuristics[3],
"Connected Holes", chromosome.heuristics[4],
"Blockades", chromosome.heuristics[5],
"Altitude Delta", chromosome.heuristics[6],
"Weighted Blocks", chromosome.heuristics[7],
"Horizonal Roughness", chromosome.heuristics[8],
"Vertical Roughness", chromosome.heuristics[9],
"Wells", chromosome.heuristics[10],
"Biggest Well", chromosome.heuristics[11],
"Lines Cleared", self.linesCleared
), (self.rlim+CELL_SIZE, CELL_SIZE*7))
self.draw_matrix(self.bground_grid, (0,0))
self.draw_matrix(self.board, (0,0))
self.draw_matrix(self.stone, (self.stone_x, self.stone_y))
self.draw_matrix(self.next_stone, (COLS+1,2))
pygame.display.update()
for event in pygame.event.get():
if event.type == self.DROPEVENT:
self.drop()
elif event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
for key in key_actions:
if event.key == eval("pygame.K_" + key):
key_actions[key]()
if self.piecesPlayed > PIECELIMIT:
self.gameover = True
if self.genetics:
#print(self.linesCleared)
self.genetics.GameOver(self.linesCleared)
clock.tick(145)
if __name__ == "__main__":
from agent import Agent
app = TetrisApp()
app.ai = Agent(app)
app.ai.instantPlay = True
app.run()
| UTF-8 | Python | false | false | 9,665 | py | 6 | tetris.py | 4 | 0.636213 | 0.608898 | 0 | 332 | 28.111446 | 201 |
mydear33000/Person-Reid | 19,610,820,701,955 | 3b88142fba6095908d6bac6f98dea0143537c6c8 | a82fe21d1027b1a7aa9647af63e76bc80f2f575c | /scripts/attrconf.py | b449fbcb15341bf26b5fa2c668e948a8d512cc77 | [
"MIT"
] | permissive | https://github.com/mydear33000/Person-Reid | 4b99ae3b39aacee4361176ea6d36ac100e41c5a6 | 0aad210d370737ec8654972d509ad848b22f6ee6 | refs/heads/master | 2017-05-12T08:29:37.359631 | 2014-03-07T15:54:11 | 2014-03-07T15:54:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# datasets = ['CUHK', 'CUHKL', 'CUHKD', 'GRID', 'PRID', 'VIPeR', '3DPeS', 'SARC3D']
datasets = ['3DPeS', 'SARC3D', 'CUHK_01', 'CUHK_02', 'CUHK_03', 'CUHK_04', 'CUHK_05', 'CUHK_07']
names = [
'genderFemale',
'genderMale',
'ageChild',
'ageYouth',
'ageMiddle',
'ageElder',
'raceAsian',
'raceBlack',
'raceWhite',
'accessoryCap',
'accessoryFaceMask',
'accessoryGlasses',
'accessoryHairBand',
'accessoryHat',
'accessoryHeadphone',
'accessoryKerchief',
'accessoryMuffler',
'accessorySunglasses',
'accessoryTie',
'accessoryOther',
'accessoryNothing',
'carryingBackpack',
'carryingHandbag',
'carryingLuggageCase',
'carryingOutwear',
'carryingShoppingBag',
'carryingShoulderBag',
'carryingUmbrella',
'carryingOther',
'carryingNothing',
'upperBodyBlack',
'upperBodyBlue',
'upperBodyBrown',
'upperBodyGreen',
'upperBodyGrey',
'upperBodyOrange',
'upperBodyPink',
'upperBodyPurple',
'upperBodyRed',
'upperBodyWhite',
'upperBodyYellow',
'upperBodyOtherColor',
'upperBodyCoat',
'upperBodyDownCoat',
'upperBodyDress',
'upperBodyJacket',
'upperBodyShirt',
'upperBodySweater',
'upperBodySuit',
'upperBodyTshirt',
'upperBodyOtherStyle',
'upperBodyNoSleeve',
'upperBodyShortSleeve',
'upperBodyLongSleeve',
'upperBodyLogo',
'upperBodyPlaid',
'upperBodyHStripe',
'upperBodyVStripe',
'upperBodyOtherTexture',
'upperBodyNoTexture',
'lowerBodyBlack',
'lowerBodyBlue',
'lowerBodyBrown',
'lowerBodyGreen',
'lowerBodyGrey',
'lowerBodyOrange',
'lowerBodyPink',
'lowerBodyPurple',
'lowerBodyRed',
'lowerBodyWhite',
'lowerBodyYellow',
'lowerBodyOtherColor',
'lowerBodyJeans',
'lowerBodyPants',
'lowerBodySports',
'lowerBodySkirt',
'lowerBodyStockings',
'lowerBodySuit',
'lowerBodyOtherStyle',
'lowerBodyShort',
'lowerBodyCapri',
'lowerBodyLong',
'lowerBodyLogo',
'lowerBodyPlaid',
'lowerBodyHStripe',
'lowerBodyVStripe',
'lowerBodyOtherTexture',
'lowerBodyNoTexture',
'hairBlack',
'hairBlue',
'hairBrown',
'hairGreen',
'hairGrey',
'hairOrange',
'hairPink',
'hairPurple',
'hairRed',
'hairWhite',
'hairYellow',
'hairOtherColor',
'hairBald',
'hairBrushCut',
'hairMidLength',
'hairLong'
]
unival = [
['genderFemale', 'genderMale'],
['ageChild', 'ageYouth', 'ageMiddle', 'ageElder'],
['raceAsian', 'raceBlack', 'raceWhite'],
['upperBodyNoSleeve', 'upperBodyShortSleeve', 'upperBodyLongSleeve'],
['upperBodyLogo', 'upperBodyPlaid', 'upperBodyHStripe', 'upperBodyVStripe', 'upperBodyOtherTexture', 'upperBodyNoTexture'],
['lowerBodyBlack', 'lowerBodyBlue', 'lowerBodyBrown', 'lowerBodyGreen', 'lowerBodyGrey', 'lowerBodyOrange', 'lowerBodyPink', 'lowerBodyPurple', 'lowerBodyRed', 'lowerBodyWhite', 'lowerBodyYellow', 'lowerBodyOtherColor'],
['lowerBodyJeans', 'lowerBodyPants', 'lowerBodySports', 'lowerBodySkirt', 'lowerBodyStockings', 'lowerBodySuit', 'lowerBodyOtherStyle'],
['lowerBodyShort', 'lowerBodyCapri', 'lowerBodyLong'],
['lowerBodyLogo', 'lowerBodyPlaid', 'lowerBodyHStripe', 'lowerBodyVStripe', 'lowerBodyOtherTexture', 'lowerBodyNoTexture'],
['hairBlack', 'hairBlue', 'hairBrown', 'hairGreen', 'hairGrey', 'hairOrange', 'hairPink', 'hairPurple', 'hairRed', 'hairWhite', 'hairYellow', 'hairOtherColor'],
['hairBald', 'hairBrushCut', 'hairMidLength', 'hairLong']
]
unival_titles = [
'Gender',
'Age',
'Race',
'Upper Body Sleeve',
'Upper Body Texture',
'Lower Body Color',
'Lower Body Style',
'Lower Body Length',
'Lower Body Texture',
'Hair Color',
'Hair Style'
]
multival = [
['accessoryCap', 'accessoryFaceMask', 'accessoryGlasses', 'accessoryHairBand', 'accessoryHat', 'accessoryHeadphone', 'accessoryKerchief', 'accessoryMuffler', 'accessorySunglasses', 'accessoryTie', 'accessoryOther', 'accessoryNothing'],
['carryingBackpack', 'carryingHandbag', 'carryingLuggageCase', 'carryingOutwear', 'carryingShoppingBag', 'carryingShoulderBag', 'carryingUmbrella', 'carryingOther', 'carryingNothing'],
['upperBodyBlack', 'upperBodyBlue', 'upperBodyBrown', 'upperBodyGreen', 'upperBodyGrey', 'upperBodyOrange', 'upperBodyPink', 'upperBodyPurple', 'upperBodyRed', 'upperBodyWhite', 'upperBodyYellow', 'upperBodyOtherColor'],
['upperBodyCoat', 'upperBodyDownCoat', 'upperBodyDress', 'upperBodyJacket', 'upperBodyShirt', 'upperBodySweater', 'upperBodySuit', 'upperBodyTshirt', 'upperBodyOtherStyle']
]
multival_titles = [
'Accessories',
'Carryings',
'Upper Body Colors',
'Upper Body Styles'
]
| UTF-8 | Python | false | false | 4,844 | py | 54 | attrconf.py | 35 | 0.666804 | 0.663088 | 0 | 154 | 30.454545 | 239 |
demo112/1807 | 14,817,637,192,745 | 065ae040b22eb5404aaaa28adaba45cca1642008 | bf397e60bba27b649084966aee686869c7df595d | /PythonNet/day09/day9/thread_server.py | 149f4b89af80e3e479a6378481bf0c2461dffce5 | [] | no_license | https://github.com/demo112/1807 | 3783e37f7dab3945a3fc857ff8f77f4690012fbe | 9b921c90b3003226d919017d521a32da47e546ad | refs/heads/master | 2022-12-01T10:50:24.086828 | 2018-12-06T09:48:14 | 2018-12-06T09:48:14 | 150,758,323 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from socket import *
import os,sys
from threading import *
HOST = "0.0.0.0"
PORT = 8888
ADDR = (HOST,PORT)
#客户端处理函数
def handler(connfd):
print("Connect from",connfd.getpeername())
while True:
data = connfd.recv(1024).decode()
if not data:
break
print(data)
connfd.send(b'Receive your msg')
connfd.close()
s = socket()
s.bind(ADDR)
s.listen(5)
while True:
try:
connfd,addr = s.accept()
except KeyboardInterrupt:
s.close()
sys.exit("服务器退出")
except Exception as e:
print(e)
continue
t = Thread(target=handler,args= (connfd,))
t.setDaemon(True)
t.start()
| UTF-8 | Python | false | false | 705 | py | 350 | thread_server.py | 263 | 0.584435 | 0.565345 | 0 | 36 | 17.888889 | 46 |
Phoenicians-2020/barter-2020 | 4,569,845,249,594 | 98a1e24f37e5548c6962297bfa6aff1c46da7d86 | 7ae1f55ad577831316d27caa6c9dd7d99521d538 | /users/admin.py | fa0719033df8d39538bf2fbf0371c3e7ab99fcd2 | [] | no_license | https://github.com/Phoenicians-2020/barter-2020 | 51ca701aedebe0ec11d30edbea9b197c25d75db3 | efd235ba2f3cf3ad7fdee85f230a1c3f530aa264 | refs/heads/master | 2022-11-25T06:07:11.178513 | 2020-07-30T06:34:47 | 2020-07-30T06:34:47 | 281,867,116 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from django.contrib.auth import get_user_model
from users.models import (
Profile,
Interests,
User
)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ["id", "user", "gender", ]
search_fields = ["user__name", "user__email"]
@admin.register(Interests)
class InterestsAdmin(admin.ModelAdmin):
list_display = ["id", "name", "date_created", "date_updated"]
search_fields = ["name"]
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ["id", "username", "name", "is_superuser"]
readonly_fields = ["password"]
search_fields = ["name"]
| UTF-8 | Python | false | false | 657 | py | 23 | admin.py | 19 | 0.674277 | 0.674277 | 0 | 27 | 23.333333 | 65 |
kanatnadyrbekov/Ch1Part2-Task-17 | 17,910,013,628,071 | cd98b77f7b44f64495f7221e320b8f7967830e9b | 1012238136c7fd2e2ed5e0f1271ce93b8576279b | /task17.py | 42d818cecfa2dbb3341d461a5314c373249705cd | [] | no_license | https://github.com/kanatnadyrbekov/Ch1Part2-Task-17 | 6f380aa5024e82f4e71c2554b744d9bec1427ff6 | 18c398cd31a894db3b7aa7244bc14c27e790168b | refs/heads/master | 2020-12-02T01:49:45.815116 | 2019-12-30T04:40:44 | 2019-12-30T04:40:44 | 230,848,490 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Write the code which will write excepted data to files below
# For example given offices of Google:
# 1) google_kazakstan.txt
# 2) google_paris.txt
# 3)google_uar.txt
# 4)google_kyrgystan.txt
# 5)google_san_francisco.txt
# 6)google_germany.txt
# 7)google_moscow.txt
# 8)google_sweden.txt
# When the user will say “Hello”
# Your code must communicate and give a choice from listed offices. After it
# has to receive a complain from user, and write it to file chosen by user.
# Hint: Use construction “with open”
def complains():
google_branches = {1: 'google_kazakhstan.txt',
2: 'google_paris.txt',
3: 'google_kyrgyzstan.txt',
4: 'google_san_francisco.txt',
5: 'google_germany.txt',
6: 'google_moscow.txt',
7: 'google_sweden.txt'
}
print("Enter a number: ")
for key, value in google_branches.items():
office = value.replace('_', ' ').title()
print(f"{key}:{office.replace('.Txt','')}")
user_choice = int(input("Enter branch num:"))
try:
office = google_branches[user_choice]
user_text = input("Enter your text:")
with open(office, 'w') as the_file:
the_file.write(user_text)
print("Thanks for feedback")
except KeyError:
print("Choose from the list above")
complains()
complains() | UTF-8 | Python | false | false | 1,457 | py | 1 | task17.py | 1 | 0.586611 | 0.576259 | 0 | 45 | 31.222222 | 76 |
LopesAbigail/intro-ciencia-computacao | 3,788,161,207,111 | f53a6ab7c6ae22246ab69f7b9d1faf2a941ef065 | 8e7f63c9c4f9da6bdd2ed23a38e912400c8e4097 | /Tests/raizQuadrada.py | 1542303574f2d79e5f16301e3348c2ece860208e | [] | no_license | https://github.com/LopesAbigail/intro-ciencia-computacao | 3e339c6778734bded8c3830a4d6100b1110887a4 | ca85640ee5c415bdb4b86af64189b5761b4d9c30 | refs/heads/main | 2023-03-16T09:39:12.626005 | 2021-03-10T14:27:26 | 2021-03-10T14:27:26 | 345,622,914 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
a = int(input("Insira um valor para A: "))
b = int(input("Insira um valor para B: "))
c = int(input("Insira um valor para C: "))
delta = (b**2)-4*a*c
if (delta >= 0):
x1 = (-b + math.sqrt(delta)) / (2*a)
x2 = (-b - math.sqrt(delta)) / (2*a)
if (delta == 0):
print("A equação possui apenas uma raiz real, que é",x1,"\nDelta =",delta)
else:
print("A equação possui duas raízes reais, que são:",x1,"e",x2,"\nDelta =",delta)
else:
print("A equação não possui raízes reais.\nDelta =",delta)
| UTF-8 | Python | false | false | 548 | py | 67 | raizQuadrada.py | 66 | 0.58473 | 0.564246 | 0 | 17 | 30.588235 | 89 |
affinitic/fbk.policy | 15,367,393,025,151 | 5d5da09eac746454d64be0bea33a1b2466b6a0f3 | de645aaf06af4cc87e10e599434a306555e055d6 | /src/fbk/policy/content/members.py | abecd5f1c37cceffcd27ff9879607e49ed513be0 | [] | no_license | https://github.com/affinitic/fbk.policy | 205c6bc34e5ff1a7940eaacf0fa8a96015cfaa69 | 8d3d9a9b4b367d2afc063b5cac1fe318148161ae | refs/heads/master | 2016-09-15T21:18:15.113156 | 2016-03-15T20:23:56 | 2016-03-15T20:23:56 | 39,332,229 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
fbk.policy
----------
Created by mpeeters
:copyright: (c) 2015 by Affinitic SPRL
:license: GPL, see LICENCE.txt for more details.
"""
from collective.contact.core.content.directory import Directory
from collective.contact.core.content.directory import IDirectory
from five import grok
from plone.autoform import directives as form
from plone.dexterity.schema import DexteritySchemaPolicy
from zope.interface import implements
class IMembers(IDirectory):
form.omitted('position_types', 'organization_types', 'organization_levels')
class Members(Directory):
implements(IMembers)
class MembersSchemaPolicy(grok.GlobalUtility, DexteritySchemaPolicy):
grok.name('members_schema_policy')
def bases(self, schema_name, tree):
return (IMembers, )
| UTF-8 | Python | false | false | 799 | py | 83 | members.py | 46 | 0.758448 | 0.75219 | 0 | 31 | 24.774194 | 79 |
Amos-x/Operation | 1,133,871,392,966 | debb9f0cf6b6bd7838a6c7498f95ce09da8528a2 | 96c24d4d8b620104ac7ea4ecd31610203bb4b6f6 | /apps/assets/forms/domain.py | 257f5a1ee5f442514cea644af583f87757b9d47f | [] | no_license | https://github.com/Amos-x/Operation | 7bd7ef0582e0e700ff9a40c7d47ab435425185bf | 274ab76ad2af79f47aa01f7b35992eef76d59a29 | refs/heads/master | 2020-03-28T18:18:50.409056 | 2019-04-16T11:13:45 | 2019-04-16T11:13:45 | 148,870,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
# __author__ = Amos
# Email = 379833553@qq.com
# Create_at = 2019-03-05 14:19
# FileName = domain
from django import forms
from django.utils.translation import gettext_lazy as _
from assets.models import Domain, Asset, Gateway
from .user import PasswordAndKeyAuthForm
__all__ = ['DomainForm', 'GatewayForm']
class DomainForm(forms.ModelForm):
assets = forms.ModelMultipleChoiceField(
queryset=Asset.objects.all(), label=_("Asset"), required=False,
widget=forms.SelectMultiple(attrs={
'class': 'select2', 'data-placeholder': _('Select assets')
})
)
class Meta:
model = Domain
fields = ['name', 'comment', 'assets']
def __init__(self, *args, **kwargs):
if kwargs.get('instance', None):
initial = kwargs.get('initial', {})
initial['assets'] = kwargs['instance'].domain_assets.all()
super().__init__(*args,**kwargs)
def save(self, commit=True):
instance = super().save(commit=commit)
assets = self.cleaned_data['assets']
instance.domain_assets.set(assets)
return instance
class GatewayForm(PasswordAndKeyAuthForm):
class Meta:
model = Gateway
fields = [
'name', 'ip', 'port', 'username', 'protocol', 'domain', 'password',
'private_key_file', 'is_active', 'comment'
]
widgets = {
'name': forms.TextInput(attrs={'placeholder': _('Name')}),
'username': forms.TextInput(attrs={'placeholder': _('Username')})
}
help_texts = {
'name': '* required',
'username': '* required'
}
def save(self, commit=True):
""" 因为定义了自定义字段,所以要重写save函数 """
instance = super().save()
password = self.cleaned_data.get('password')
private_key, public_key = super().gen_keys()
instance.set_auth(password=password, private_key=private_key)
return instance
| UTF-8 | Python | false | false | 2,036 | py | 103 | domain.py | 99 | 0.58 | 0.5685 | 0 | 65 | 29.769231 | 79 |
hainingpan/SPT | 7,610,682,080,244 | d1d6078043c9c51b18e98a4b74c8a8a53078c8d2 | dbef1e401d443e17e484f7e0f87d8c9b556bb98f | /MI_LN_CI.py | 326f9c2a466176e79a9ac44302bd42f91f50ae33 | [] | no_license | https://github.com/hainingpan/SPT | cd67222bc513ed475974626b91cd26b089e0464f | fe1b465462fd93ca247dfd5ed56ddec416cba8e7 | refs/heads/master | 2023-07-30T12:38:22.760225 | 2021-09-24T15:27:16 | 2021-09-24T15:27:16 | 353,170,894 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Chern_insulator import *
import matplotlib.pyplot as plt
import argparse
import pickle
import numpy as np
import time
from mpi4py.futures import MPIPoolExecutor
from copy import copy
def run(params_init,subregionA,subregionB,subregionAp,subregionBp,Bp):
params=copy(params_init)
params.measure_all_Born(subregionAp)
if Bp:
params.measure_all_Born(subregionBp)
MI=params.mutual_information_m(subregionA,subregionB)
LN=params.log_neg(subregionA,subregionB)
return MI,LN
if __name__=="__main__":
# if rank==0:
parser=argparse.ArgumentParser()
parser.add_argument('--es',default=100,type=int)
parser.add_argument('--timing',default=False,type=bool)
parser.add_argument('--Lx',default=32,type=int)
parser.add_argument('--Ly',default=16,type=int)
parser.add_argument('--pts',default=100,type=int)
parser.add_argument('--Bp',default=False,type=bool)
args=parser.parse_args()
if args.timing:
st=time.time()
eta_Born_list=[]
MI_Born_list=[]
LN_Born_list=[]
params_init=Params(m=2,Lx=args.Lx,Ly=args.Ly)
executor=MPIPoolExecutor()
mutual_info_ensemble_list_pool=[]
for pt in range(args.pts):
MI_ensemble_list=[]
LN_ensemble_list=[]
inputs=[]
x=sorted(np.random.choice(np.arange(1,args.Lx),3,replace=False))
x=[0]+x
eta=cross_ratio(x,args.Lx)
eta_Born_list.append(eta)
subregionA=[np.arange(x[0],x[1]),np.arange(params_init.Ly)]
subregionB=[np.arange(x[2],x[3]),np.arange(params_init.Ly)]
subregionAp=[np.arange(x[1],x[2]),np.arange(params_init.Ly)]
subregionBp=[np.arange(x[3],args.Lx),np.arange(params_init.Ly)]
inputs=[(params_init,subregionA,subregionB,subregionAp,subregionBp,args.Bp) for _ in range(args.es)]
mutual_info_ensemble_list_pool.append(executor.starmap(run,inputs))
for pt in range(args.pts):
print("{:d}:".format(pt),end='')
st=time.time()
MI_ensemble_list=[]
LN_ensemble_list=[]
for result in mutual_info_ensemble_list_pool[pt]:
MI,LN=result
MI_ensemble_list.append(MI)
LN_ensemble_list.append(LN)
MI_Born_list.append(MI_ensemble_list)
LN_Born_list.append(LN_ensemble_list)
print("{:.1f}".format(time.time()-st))
executor.shutdown()
eta_Born_list=np.array(eta_Born_list)
MI_Born_list=np.array(MI_Born_list)
LN_Born_list=np.array(LN_Born_list)
with open('MI_LN_CI_Born_En{:d}_pts{:d}_Lx{:d}_Ly{:d}_Ap{:s}.pickle'.format(args.es,args.pts,args.Lx,args.Ly,args.Bp*'Bp'),'wb') as f:
pickle.dump([eta_Born_list,MI_Born_list,LN_Born_list],f)
if args.timing:
print('Elapsed:{:.1f}'.format(time.time()-st))
| UTF-8 | Python | false | false | 2,806 | py | 53 | MI_LN_CI.py | 33 | 0.638275 | 0.629366 | 0 | 80 | 34.075 | 138 |
gehuangyi20/random_spiking | 13,632,226,219,867 | b3d78c87acb2af2f2bfe6e00f388db32db93fdfe | 11f810f2cf7d875e2d974ebe703831b7c66822da | /RsNet/compute_adv_diff_vs_tran_cross.py | 95daf237784e4c7e20ffa3241c22dd17112cc982 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/gehuangyi20/random_spiking | 7ca0a5ead3a617dfe3693c9e68bbcdb3cf6b0990 | c98b550420ae4061b9d47ca475e86c981caf5514 | refs/heads/master | 2021-04-09T20:54:17.834219 | 2020-03-21T23:05:00 | 2020-03-21T23:05:00 | 248,879,259 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
import os
import json
import csv
import argparse
import numpy as np
import re
parser = argparse.ArgumentParser(description='create table for adv_diff vs transferability cross methods.')
parser.add_argument('-d', '--dir', help='directory, required', type=str, default=None)
parser.add_argument('-c', '--config', help='config file, default config.json', type=str, default='config.json')
parser.add_argument('-o', '--output', help='output name, default summary', type=str, default='summary_bin')
parser.add_argument('--suffix', help='dataset suffix', type=str, default='')
args = parser.parse_args()
_dir = args.dir
output_file = args.output
config_fp = open(os.path.join(_dir, args.config), "rb")
json_str = config_fp.read()
config_fp.close()
config = json.loads(json_str.decode())
# mkdir
if not os.path.exists(os.path.dirname(os.path.join(_dir, output_file))):
os.makedirs(os.path.dirname(os.path.join(_dir, output_file)))
_bin = []
att = []
for mthd in config:
cur_raw_trans_fp = open(os.path.join(_dir, mthd['transfer']), "r")
cur_transfer_reader = csv.DictReader(cur_raw_trans_fp, dialect='excel-tab')
cur_att = {
"name": mthd['name']
}
cur_att_def = []
cur_data = {}
cur_data_std = {}
cur_pred = {}
cur_pred_std = {}
for transfer_row in cur_transfer_reader:
tmp_def_name = args.suffix + re.sub('[^A-Za-z]+', '', transfer_row['dataset'])
tmp_bin = int(transfer_row['bin'])
_bin.append(tmp_bin)
if tmp_def_name not in cur_att_def:
cur_att_def.append(tmp_def_name)
cur_data[tmp_def_name] = {}
cur_data_std[tmp_def_name] = {}
cur_pred[tmp_def_name] = {}
cur_pred_std[tmp_def_name] = {}
cur_data[tmp_def_name][tmp_bin] = float(transfer_row['trans_rate_mean'])*100
cur_data_std[tmp_def_name][tmp_bin] = float(transfer_row['trans_rate_std'])*100
cur_pred[tmp_def_name][tmp_bin] = float(transfer_row['pred_rate_mean']) * 100
cur_pred_std[tmp_def_name][tmp_bin] = float(transfer_row['pred_rate_std']) * 100
cur_att['def'] = cur_att_def
cur_att['data'] = cur_data
cur_att['data_std'] = cur_data_std
cur_att['pred'] = cur_pred
cur_att['pred_std'] = cur_pred_std
att.append(cur_att)
unique_bin = np.unique(_bin)
for cur_bin in _bin:
cur_fp = open(os.path.join(_dir, output_file + str(cur_bin) + '.csv'), "wb")
cur_pred_fp = open(os.path.join(_dir, output_file + "_pred" + str(cur_bin) + '.csv'), "wb")
cur_fp.write('att'.encode())
cur_pred_fp.write('att'.encode())
for cur_def_name in att[0]['def']:
cur_fp.write(('|' + cur_def_name + '|' + cur_def_name + 'std').encode())
cur_pred_fp.write(('|' + cur_def_name + '|' + cur_def_name + 'std').encode())
cur_fp.write('\n'.encode())
cur_pred_fp.write('\n'.encode())
for cur_att in att:
skip = False
for cur_def_name in cur_att['def']:
if str(cur_att['data'][cur_def_name][cur_bin]) == 'nan':
skip = True
break
if skip:
continue
cur_fp.write(cur_att['name'].encode())
cur_pred_fp.write(cur_att['name'].encode())
for cur_def_name in cur_att['def']:
cur_fp.write(('|' + str(cur_att['data'][cur_def_name][cur_bin]) +
'|' + str(cur_att['data_std'][cur_def_name][cur_bin])).encode())
cur_pred_fp.write(('|' + str(cur_att['pred'][cur_def_name][cur_bin]) +
'|' + str(cur_att['pred_std'][cur_def_name][cur_bin])).encode())
cur_fp.write('\n'.encode())
cur_pred_fp.write('\n'.encode())
cur_fp.close()
cur_pred_fp.close()
| UTF-8 | Python | false | false | 3,743 | py | 69 | compute_adv_diff_vs_tran_cross.py | 53 | 0.580283 | 0.576543 | 0 | 104 | 34.990385 | 111 |
hanskamin/music-inference | 13,709,535,641,081 | 12d676862d020b0f577e811c78fe7ab7ed8f9a3b | 335cc9ec1aa397431616c6a44e815d358fe1c815 | /onto_utils.py | b56bdced450b421885e2c74f8773944a6048b42c | [
"MIT"
] | permissive | https://github.com/hanskamin/music-inference | ab9fcdf4a087618cc1da39007364619835db04a9 | 5b7830561d8538de10a3c8fe5a140b0b7892e604 | refs/heads/master | 2020-05-26T04:18:02.830067 | 2019-06-12T01:53:37 | 2019-06-12T01:53:37 | 188,100,036 | 0 | 0 | MIT | false | 2019-06-07T17:24:01 | 2019-05-22T19:21:00 | 2019-06-07T00:06:46 | 2019-06-07T17:24:00 | 38,348 | 0 | 0 | 2 | Python | false | false | """
This module will query the OWL Ontology based on a user's inputted genre to
select a set of instruments as midi program integers
"""
import owlready2 as owl
from music21 import instrument
def load_ontology():
return owl.get_ontology("root-ontology.owl").load()
def get_genre_map(ontology):
genres = {}
key = 0
for individual in ontology.search(type=ontology.MusicalGenre):
genres.update({key: individual})
key += 1
return genres
def get_instruments(genre, ontology):
programs = []
if genre.label[0] == "Blues":
programs.append(instrument.AcousticGuitar().midiProgram)
programs.append(instrument.Harmonica().midiProgram)
programs.append(instrument.TomTom().midiProgram)
elif genre.label[0] == "Folk":
programs.append(instrument.Banjo().midiProgram)
programs.append(instrument.AcousticBass().midiProgram)
programs.append(instrument.Piano().midiProgram)
elif genre.label[0] == "Rock":
programs.append(instrument.ElectricGuitar().midiProgram)
programs.append(instrument.ElectricBass().midiProgram)
programs.append(instrument.BassDrum().midiProgram)
elif genre.label[0] == "Classical":
programs.append(instrument.Violin().midiProgram)
programs.append(instrument.Oboe().midiProgram)
programs.append(instrument.Flute().midiProgram)
programs.append(instrument.Viola().midiProgram)
elif genre.label[0] == "Country":
programs.append(instrument.AcousticGuitar().midiProgram)
programs.append(instrument.Banjo().midiProgram)
programs.append(instrument.TomTom().midiProgram)
return programs
| UTF-8 | Python | false | false | 1,686 | py | 11 | onto_utils.py | 8 | 0.697509 | 0.691578 | 0 | 47 | 34.87234 | 76 |
kjhcode/2021 | 16,398,185,158,759 | 25dc35f6f4cb7415df59ec51fcb3fdd397947b9a | 3eed03943877231dbbb50c98c6d27af6fa98b387 | /142-한학기내신등급산출.py | 8ad5040da7ea4635f5ea414b4f3f8469b4fd7ad6 | [] | no_license | https://github.com/kjhcode/2021 | 22bbbc552274ae0bb6fd4d27b151f6b08260ae16 | 0a57c7c2456f2a022bd533ce0fb92992b31cef23 | refs/heads/main | 2023-08-28T17:12:53.336111 | 2021-10-05T08:25:06 | 2021-10-05T08:25:06 | 413,659,546 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("[내신 등급 계산 프로그램]")
myGrade = 0
totUnit = 0
totGrade = 0
subjectCount = int(input("과목 수: "))
for i in range(subjectCount):
print(i+1, end=" ")
unit = int(input("과목 단위 수: "))
grade = int(input("석차 등급: "))
totUnit += unit #-> 단위수합=단위수합+단위수
totGrade += grade*unit #-> totGrade=등급*단위수
myGrade = totGrade/totUnit
print("내신 등급은", myGrade, "입니다.")
if myGrade <= 3:
myLevel = "상위권"
elif myGrade <= 6:
myLevel = "중위권"
else:
myLevel = "하위권"
print("수준은",myLevel,"입니다.")
| UTF-8 | Python | false | false | 688 | py | 9 | 142-한학기내신등급산출.py | 7 | 0.526502 | 0.515901 | 0 | 20 | 25.7 | 64 |
malhotraa/aoc2020 | 1,700,807,080,130 | 49dcd2ffdcc7c08f7c45b9b08117b71c5d1f71ef | 607eb192347f05c0af64912724f29de8ae47229f | /day12/solution.py | 19b35055aeb395cb7d85b68773d1e672ab3f63e9 | [] | no_license | https://github.com/malhotraa/aoc2020 | 5ca5862d0158c4289b33a23db7fc11616d78328b | 0ad03c54f273604ac9ff091e9d43a209815b9861 | refs/heads/main | 2023-02-04T17:12:43.709446 | 2020-12-25T15:31:46 | 2020-12-25T15:31:46 | 318,332,585 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | with open('input.txt') as f:
lines = f.read().split('\n')
def normalize_orientation(theta):
while theta <= -180:
theta += 360
while theta > 180:
theta -= 360
return theta
def part1_manhattan(lines):
dir_to_xy = {
'N': (0, -1),
'S': (0, 1),
'E': (1, 0),
'W': (-1, 0),
}
orientation_to_xy = {
0: (1, 0),
90: (0, -1),
180: (-1, 0),
-90: (0, 1),
}
rotation_to_theta = {
'L': 1,
'R': -1,
}
x, y = 0, 0
theta = 0
for line in lines:
action = line[0]
value = int(line[1:])
if action == 'F':
mul_x, mul_y = orientation_to_xy[theta]
x += mul_x * value
y += mul_y * value
elif action in set(['N', 'S', 'E', 'W']):
mul_x, mul_y = dir_to_xy[action]
x += mul_x * value
y += mul_y * value
elif action in set(['L', 'R']):
theta += (rotation_to_theta[action] * value)
theta = normalize_orientation(theta)
return abs(x) + abs(y)
def rotate_xy(x, y, theta):
assert -270 <= theta <= 270
if theta == 90 or theta == -270:
return y, -x
elif theta == 180 or theta == -180:
return -x, -y
elif theta == 270 or theta == -90:
return -y, x
def part2_manhattan(lines):
dir_to_xy = {
'N': (0, 1),
'S': (0, -1),
'E': (1, 0),
'W': (-1, 0),
}
rotation_to_theta = {
'L': -1,
'R': 1,
}
x, y = 0, 0
way_x, way_y = 10, 1
for line in lines:
action = line[0]
value = int(line[1:])
if action == 'F':
x += way_x * value
y += way_y * value
elif action in set(['N', 'S', 'E', 'W']):
mul_x, mul_y = dir_to_xy[action]
way_x += mul_x * value
way_y += mul_y * value
elif action in set(['L', 'R']):
way_x, way_y = rotate_xy(way_x, way_y, rotation_to_theta[action] * value)
return abs(x) + abs(y)
print('part1 manhattan: ', part1_manhattan(lines))
print('part2 manhattan: ', part2_manhattan(lines)) | UTF-8 | Python | false | false | 2,174 | py | 29 | solution.py | 25 | 0.436983 | 0.396504 | 0 | 83 | 25.204819 | 85 |
SimonGuillot/scilang | 11,287,174,065,323 | af3723071a9d8777167a1fe12faf9d9f12598cbd | e25a704fd0e751369b662daf5876f3c306839afc | /S3 morpho/main.py | 1d9fb27ad62a2d4cfeb536218d9a7f50c8645b78 | [] | no_license | https://github.com/SimonGuillot/scilang | c25d24426c69beae50e4a7583c1d2fb71e2e3b3d | f94d81a3fc6435bcb99a6ed89964ec7543d7f85b | refs/heads/master | 2020-04-24T15:12:08.770432 | 2019-04-14T16:39:46 | 2019-04-14T16:39:46 | 172,056,468 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import yaml
def get(path):
test_value=path[0]
print(test_value)
#print(dictionnaire[test_value]) #cette étape ne fonctionne pas, pourquoi pas comme dico
"""def compare(cont_end,cont_beg)
intersection = cont_end.intersection(cont_beg)
if intersection != null :
return intersection"""
def count_context(dictionnaire):
n_context=len(yourdict)
return n_context
################################################################## LES SPLITS ICI
#string récupérée depuis clé dictionnaire, un contexte
string_travail="{u't-': u'^(.*[fs][iyEe92a])t$'}"
def split_trans_cont(string):
transformation,contexte=string.split(":")
return transformation, contexte
#split char à char sur transfo et set à set sur contexte ?
def split(a):
liste_trans=list(transfor)
for i in range (len(liste_trans)):
if liste_trans[i]=='-' :
pre_trans=liste_trans[0:i]
post_trans=liste_trans[i+1:len(liste_trans)]
print(pre_trans)
print(post_trans)
return pre_trans,post_trans
"""
def apply_trans(transformation, contexte):
"""
##############################################
def main() :
#ouverture table yaml
stream = open('ca.yaml', 'r')
yaml.dump(yaml.load(stream))
dictionnaire=stream
#chemin à parcourir
path=[(u'ii1P', u'pP'), (u'pP', u'is1S'), (u'is1S', u'ii1S'), (u'ii1S', u'ppMP'), (u'ppMP', u'ii1P')]
print(path[0]) #erreur en 'none'
#begin
#pathing
"""
get(path)
if n_context == 1 : #nécessité de considérer différement les
split(contexte) #cas où on a plusieurs contextes pour pouvoir les enregistrer ensemble
liste_result.append(compare(cont_end,cont_beg))
elif n_context > 1 :
context_step_n
for i in range len(context_step_n):
split(contexte)
result_step_n.append(compare(cont_end,cont_beg))
liste_result.append(result_step_n)
return list
"""
#split and compare
#end
########################################
if __name__ == "__main__":
win = None
main()
#########################################
"""
#itération dictionnaire
for key, value in d.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
print(key,sub_key,sub_value)
else:
print(key,value)
with open("fichier.yaml", 'r') as stream:
try:
print(yaml.load(stream))
yaml.load(stream)
yaml.dump(yaml.load(stream))
except yaml.YAMLError as exc:
print(exc)
print(mes_tuples)
def searchStringInYaml(fichier,string):
with open(filename, 'r') as stream:
content = yaml.load(stream)
if string in content:
print string
return 1
else:
return 0
stream.close()
stream = file('fichier.yaml', 'r')
dict = yaml.load(stream)
for key in dict:
if key in dict == "ai1P":
print (key), dict[key]
key = 'ai1P'
for key, value in yaml.load(open('fichier.yaml'))[('ai1P', 'ai1S')].iteritems():
print (key)
print(value)
y = yaml.load(open("fichier.yaml", "w"))
print(y)
#tentative de set à set
x="(.*[ptkbdgfsSvzZmnJjrwHiyEe926auOoêûâô][ptkbdgfsSvzZmnJjlr])E$"
lx=list(x)
print(lx)
print(len(lx))
liste_organisee=list()
en_tete=list()
liste_organisee.append(en_tete)
for i in range(len(lx)) :
if i == "[" :
en_tete.append(i)
print(en_tete)
def split():
"variables"
x="(.*[ptkbdgfsSvzZmnJjrwHiyEe926auOoêûâô][ptkbdgfsSvzZmnJjlr])E$"
lx=list(x)
en_tete=list()
test1=list()
"==============="
for i in range(len(lx)) :
if lx[i]== '(' :
a1=list()
split()
string_travail="{u't-': u'^(.*[fs][iyEe92a])t$'}"
def split_trans_cont(string):
transformation, contexte=string.split(":")
return transformation, contexte
print(split_trans_cont(string_travail))
#print(transformation)
def split_contexte(contexte):
liste_travail=list(contexte)
print(liste_travail)
def main():
string_travail="{u't-': u'^(.*[fs][iyEe92a])t$'}"
split_trans_cont(string_travail)
print(split_trans_cont(string_travail))
print(transformation)
split_contexte(split_trans_cont(string_travail))"""
| UTF-8 | Python | false | false | 4,384 | py | 7 | main.py | 2 | 0.576932 | 0.569594 | 0 | 174 | 24.063218 | 114 |
EtavaresMixes/drappi-CMS---django | 13,804,024,904,090 | 5d79b0c61caaf2e1f69b0cc8a5d2a3bffc90bbec | 23658085c5eab02a86ff866a2127803622bd8a8d | /users/views.py | 62496c5202782621839deeca81f71ab37e4e48d0 | [] | no_license | https://github.com/EtavaresMixes/drappi-CMS---django | 5014875d737f0925a005b05d5b1a48fbb74a416d | 9c1223aa8f0d4a2cfe77423816f9630a63e0af0e | refs/heads/master | 2023-03-12T09:07:55.510351 | 2021-03-05T12:43:17 | 2021-03-05T12:43:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from . forms import UserRegisterForm, LoginForm, CadastraCNPJ
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from clientes.models import Cliente
from . decorators import usuario_nao_autenticado, usuario_permitido
from django.views.generic import DetailView, ListView
from pedidos.models import Pedido
from django.contrib.auth.views import LoginView
from django.contrib.auth import authenticate, login
from django.core.paginator import Paginator
@usuario_nao_autenticado
def register(request):
title = 'CADASTRO'
slang = '"Registre sua conta!"'
form = UserRegisterForm()
cnpj_form = CadastraCNPJ()
if request.method == 'POST':
form = UserRegisterForm(request.POST)
cnpj_form = CadastraCNPJ(request.POST)
if form.is_valid() and cnpj_form.is_valid():
user = form.save()
cnpj = cnpj_form.cleaned_data['cnpj']
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
telefone = cnpj_form.cleaned_data.get('telefone')
empresa = cnpj_form.cleaned_data.get('empresa')
group = Group.objects.get(name='clientes')
user.groups.add(group)
Cliente.objects.create(user=user, cnpj=cnpj, email=email,
nome=first_name, sobrenome=last_name,
telefone=telefone, empresa=empresa
)
messages.success(request, f'Conta criada para {username}!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {
'form': form,
'cnpj_form': cnpj_form,
'title': title,
'slang': slang})
@login_required(login_url='homepage')
@usuario_permitido(allowed_roles=['clientes'])
def profile(request):
cliente = Cliente.objects.get(id=request.user.id)
pedidos = cliente.pedido_set.all()
total_de_pedidos = pedidos.count()
title = 'Perfil'
slang = '"Dados pessoais do cliente!"'
paginator = Paginator(pedidos, 10)
page = request.GET.get('p')
pedidos = paginator.get_page(page)
context = {
"clientes": cliente,
"pedidos": pedidos,
'title': title,
'slang': slang,
"total_de_pedidos": total_de_pedidos,
}
return render(request, 'users/profile.html', context)
def login_page(request):
title = 'Login'
slang = '"Insira seu dados pessoais!"'
form = LoginForm(request.POST or None)
context = {
'title': title,
'slang': slang,
'form': form}
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
# Redirect to a success page.
return redirect('/')
# context['form'] = LoginForm()
else:
context['login_message'] = "Senha inválida!!"
return render(request, 'users/login.html', context)
return render(request, 'users/login.html', context)
| UTF-8 | Python | false | false | 3,519 | py | 74 | views.py | 37 | 0.627629 | 0.627061 | 0 | 101 | 33.831683 | 74 |
jpur3846/dfe-contractors | 5,523,327,969,872 | 32c7eba5491efe4e8fbb3e0b2c6fcea811185e3b | e3e311a7a7a86d97799ee4ff58185e155d745106 | /contractors/con/apps.py | 068c2b3bc2078ecc92b68cdef2d015bd07680a45 | [] | no_license | https://github.com/jpur3846/dfe-contractors | d8938edc1e6b231b7fd132bdf9848b451bb8fdaf | 3cf34751d588cc6f0914b504a7c1c7f3e11bc385 | refs/heads/master | 2022-11-24T16:37:38.297265 | 2020-07-30T13:36:21 | 2020-07-30T13:36:21 | 276,302,265 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class ConConfig(AppConfig):
name = 'con'
| UTF-8 | Python | false | false | 80 | py | 118 | apps.py | 85 | 0.7375 | 0.7375 | 0 | 4 | 19 | 33 |
tranhoangkhuongvn/algo_gym | 12,489,764,926,698 | 87360ad7c3a6502a6025062788f9cfbd1230acfb | 2ce595e4cf76dca58fce8673325418cc13696e4d | /EPI/4-2-swap-bits.py | 9665f527680f31e9869f37ae23d345d4379cd0ba | [] | no_license | https://github.com/tranhoangkhuongvn/algo_gym | 4ae9d9570da5b0ef78594fd799734da829e02271 | 049af6be6a042c0f4f7a6532078761172ec212ae | refs/heads/master | 2020-09-02T15:02:50.246948 | 2020-01-19T00:24:50 | 2020-01-19T00:24:50 | 219,245,440 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Write a function takes a 64-bit integer and swap bits at indices i and j
#LSB is at rightmost bit
def swap_bits(n, i, j):
bit0 = (1 & (n >> i))
bit1 = (1 & (n >> j))
if bit0 != bit1:
if bit0:
#reset bit0 to 0, set bit1 to 1
n = n & (~(1 << i))
n = n | (1 << j)
else:
n = n | (1 << i)
n = n & (~(1 << j))
print(bin(n))
return n
print(swap_bits(73, 1, 6))
| UTF-8 | Python | false | false | 387 | py | 28 | 4-2-swap-bits.py | 27 | 0.503876 | 0.449612 | 0 | 19 | 19.105263 | 73 |
krishnakalyan3/spark_streaming | 1,194,000,956,084 | 256b2b17fe3e9f80606e46dae04b2deac346dbe0 | 81087a55da2f8c96c53a71fc9db577269da40e44 | /src/examples/03_agg.py | 87ce1cec6f05c304f691f3f4e4527f3e680ffe1c | [] | no_license | https://github.com/krishnakalyan3/spark_streaming | bf5a0b6fa189658c9476638789476c2dbf2882df | 57dd570981fd1ac608caa1d4496a5cc6356bf950 | refs/heads/master | 2020-03-14T10:46:33.037695 | 2018-05-07T13:30:57 | 2018-05-07T13:30:57 | 131,575,391 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyspark.sql import SparkSession
from pyspark.sql.types import StructType
from pyspark.sql.types import IntegerType, StringType, TimestampType
from pyspark.sql.types import StructField
from pyspark.sql import functions as F
from pyspark.sql.functions import split
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCountWindowed")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
lines = spark\
.readStream \
.format('socket')\
.option('host', 'localhost')\
.option('port', 9999)\
.option('includeTimestamp', 'true')\
.load()
data_frame = lines.select(
(split(lines.value, ' ')).getItem(0).cast('integer').alias("time"),
(split(lines.value, ' ')).getItem(1).cast('double').alias("moisture"),
(split(lines.value, ' ')).getItem(2).cast('float').alias("temp"),
(split(lines.value, ' ')).getItem(3).cast('float').alias("battery"),
(split(lines.value, ' ')).getItem(3).cast('double').alias("battery"),
lines.timestamp
)
# Select
df_select = data_frame.select(data_frame.time, data_frame.temp)
# Filter
df_filter = df_select.filter(df_select.temp < 0)
# Use this with watermark
# Group
#df_group = df_select.groupBy(df_filter.time).sum()
query = df_filter.writeStream \
.outputMode('append') \
.format('console') \
.start()
query.awaitTermination()
# $SPARK_HOME/bin/spark-submit 03_agg.py
| UTF-8 | Python | false | false | 1,591 | py | 7 | 03_agg.py | 6 | 0.629164 | 0.621622 | 0 | 50 | 30.82 | 90 |
kejukeji/bourjois_sub | 16,947,940,969,334 | 37fc23b8f9af7d3fd6546108950c684467d1fa87 | c24e1b0f9fc43593aaf35358be58bccb78ee67f3 | /bourjois/models/__init__.py | 65f21fdd7138ff3a11ec1bbd31636e43e428547d | [] | no_license | https://github.com/kejukeji/bourjois_sub | 5debebd2d39e72e8a291a301aea887b83e767eb2 | d8614345ae8c3ede4e4069a9d3883e06a3adff23 | refs/heads/master | 2016-09-03T07:15:52.021348 | 2014-05-16T16:05:16 | 2014-05-16T16:05:16 | 19,665,950 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: UTF-8
from .coupon import *
from .times import * | UTF-8 | Python | false | false | 58 | py | 22 | __init__.py | 11 | 0.706897 | 0.689655 | 0 | 3 | 18.666667 | 21 |
sh2MAN/newsgather | 9,440,338,160,984 | 3a230644d01dd10c07ed468739a3e4363d8f5d07 | 09c9723fe97a4e5b92207de47fdf5dd717710e97 | /core/utils.py | be0139b04d3cd22a2678e7303aefd7290ae51785 | [
"MIT"
] | permissive | https://github.com/sh2MAN/newsgather | 7107e208fa6ef9255624fbc53077630dcac51edb | c6ed86bc0c4568ffa19b988165643033eff66158 | refs/heads/main | 2023-03-30T10:06:20.989662 | 2021-03-25T21:06:46 | 2021-03-25T21:06:46 | 349,650,242 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from loguru import logger
from starlette.requests import Request
logger.add(
'logging.log', format="{name} {message}", level="INFO", rotation="5MB"
)
def get_db(request: Request):
"""Возвращает текущую сессию."""
return request.state.db
| UTF-8 | Python | false | false | 275 | py | 15 | utils.py | 10 | 0.702381 | 0.698413 | 0 | 11 | 21.909091 | 74 |
vadim-ivlev/text-processor | 16,793,322,158,589 | a0657ab1e0024c6542dba17e8575dbe900242028 | eb6de9d204ae12b8066cf437ec6d0bbedbdb5809 | /tests/test-text-processor.py | 26f2bfc0f615238cd4061fa16f7d3a622a344ccf | [] | no_license | https://github.com/vadim-ivlev/text-processor | 9c4b13015c60f022dddf341f3dd59ca30983923e | f129b08560ab97c0590d06a675e202638b73705c | refs/heads/master | 2023-01-20T18:44:14.062049 | 2020-11-26T15:03:37 | 2020-11-26T15:03:37 | 293,517,049 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | sample_text = """
<p>Глава государства начал встречу сразу с проблемных вопросов: в регионе сокращается сельхозпроизводство
на фоне солидного роста по стране. Голубев выразил надежду, что в этом году тенденцию удастся переломить.
Президент заметил: если необходима поддержка, то нужно сформулировать просьбы. "И по стройке: сокращаются
и объемы строительства. Здесь что?" - уточнил он. Собеседник рассказал о заметном сокращении индивидуального
жилищного строительства. В целом строительная программа "плюсует", но жилье "минусует", признал Голубев.
"Поэтому это стало нашим приоритетом", - заверил региональный лидер.</p><p>"Вы человек очень опытный, знаете,
мы последнее время, последние годы большое внимание обращаем на проблемы демографии", - заметил Путин.
По его словам, после определенного подъема у нас сейчас наблюдается сокращение численности постоянного населения.
"Но в Ростовской области, несмотря на то, что это южный, благоприятный климатически регион,
сокращение происходит даже в большем объеме, чем в среднем по стране", - сказал президент и назвал возможные
причины: недостаточное количество врачей и мест в детсадах. "Это очень важный фактор для того, чтобы люди себя
чувствовали уверенно и комфортно", - объяснил глава государства.</p><p>Важен и такой показатель, как уровень
безработицы. "Ясно, что это сегодня одна из главных проблем в стране, это совершенно очевидно", - заметил Путин.
Но Ростовская область развита и в промышленном отношении, и в отношении возможностей для сельского хозяйства.
"Конечно, нужно обратить внимание на рынок труда", - указал президент.</p><p>"У нас действительно "выскочила"
реально безработица - 96 с лишним тысяч человек (4,6 процента) при том, что до определенного времени уровень
безработицы был не выше или на уровне среднероссийского, - признал губернатор. - Мы предпринимаем сейчас меры
для того, чтобы максимально запустить те механизмы, которые позволяют людям работать". "Мы будем искать
новые решения. Я думаю, что для нас это важнейшая задача, усилия будем прилагать для того, чтобы здесь ситуацию
переломить", - заверил он.</p><div class="incut">В Ростовской области минимальная долговая нагрузка, низкий
уровень аварийного жилья и очень хорошие перспективы с точки зрения инвестпроектов </div><p>Глава государства
также заявил, что опережающий темп роста промышленного производства - заслуга самого Голубева и его команды.
За первое полугодие, конечно, есть спад, но он меньше, чем по стране. В нормальном состоянии и региональные финансы,
в области минимальная долговая нагрузка, низкий уровень аварийного жилья и очень хорошие перспективы с точки
зрения инвестпроектов, оценил Путин. Президент призвал поддержать усилия бизнеса по созданию новых, хорошо
оплачиваемых, качественных и современных высокотехнологичных рабочих мест. Голубев также сообщил, что
селяне прекрасно сработали по уборке ранних зерновых, и президент одобрил предложение наградить их.</p><div
class="Section">Между тем</div><p>Состоялся телефонный разговор Владимира Путина с президентом Республики
Беларусь Александром Лукашенко, сообщили в пресс-службе Кремля. Александр Лукашенко проинформировал о предпринимаемых
мерах в целях нормализации обстановки в стране. Затрагивалась также тематика двустороннего сотрудничества в вопросах
противодействия коронавирусной инфекции.</p>
"""
# sample_text = """
# Медведев возглавит комиссию по Арктике. Медведев возглавит комиссию по Арктике.
# Президент подписал указ о создании межведомственной комиссии Совбеза по вопросам обеспечения интересов РФ в Арктике. Возглавит комиссию заместитель председателя Совбеза, сейчас эту должность занимает Дмитрий Медведев.
# """
# sample_text = """
# Медведев возглавит комиссию по Арктике.
# """
import os, sys
sys.path.insert(1, os.path.dirname(sys.path[0]))
from deploy import text_processor
import simplejson as json
o = text_processor.process_text(sample_text)
print(json.dumps(o,indent=2, ensure_ascii=False))
| UTF-8 | Python | false | false | 6,936 | py | 44 | test-text-processor.py | 22 | 0.805625 | 0.803867 | 0 | 48 | 81.958333 | 219 |
tgandor/meats | 2,877,628,122,579 | 2fbfa5ce53f95d2e45735c0c0d85dfb250a313f9 | 41a4887a52afe81f203d0917c5ef54ccbe2389fe | /opencv_py/deskew.py | 696a5c1eab2d86a55ca5ace674b8aec53a858fff | [] | no_license | https://github.com/tgandor/meats | 2efc2e144fc59b2b99aeeaec5f5419dbbb323f9b | 26eb57e49752dab98722a356e80a15f26cbf5929 | refs/heads/master | 2023-08-30T20:35:47.949622 | 2023-08-25T13:26:23 | 2023-08-25T13:26:23 | 32,311,574 | 13 | 9 | null | false | 2022-06-22T20:44:44 | 2015-03-16T08:39:21 | 2022-01-06T00:31:18 | 2022-06-22T20:44:43 | 8,543 | 11 | 5 | 0 | Python | false | false | #!/usr/bin/env python
import math
import cv2
import sys
import os
from fractions import Fraction
def cv_size(img):
return tuple(img.shape[1::-1])
def cv_center(img):
w, h = cv_size(img)
return w/2, h/2
def get_screen_res():
if sys.platform.startswith('linux'):
import os
lines = [line for line in os.popen('xrandr').read().split('\n') if line.find('*') != -1]
return tuple(map(int, lines[0].split()[0].split('x')))
else:
try:
from win32gui import GetDesktopWindow, GetWindowRect
return tuple(GetWindowRect(GetDesktopWindow())[2:])
except ImportError:
pass
return 800, 600
SCREEN_WIDTH, SCREEN_HEIGHT = get_screen_res()
def show_fit(img, name='preview', expand=False, rotate=False):
# partially deprecated: cv2.WINDOW_NORMAL flag
w, h = cv_size(img)
W, H = SCREEN_WIDTH, SCREEN_HEIGHT
if w <= W and h <= H and not expand:
to_show = img
elif w > W or h > H:
if rotate and min(Fraction(W, w), Fraction(H, h)) < min(Fraction(W, h), Fraction(H, w)):
img = cv2.flip(cv2.transpose(img), 0)
w, h = h, w
if h * W > H * w:
w1 = w * H / h
h1 = H
else:
w1 = W
h1 = h * W / w
to_show = cv2.resize(img, (w1, h1))
else: # expand ...
raise NotImplementedError('Cannot expand preview image')
scale = 1.0
while True:
cv2.imshow(name, to_show)
key = cv2.waitKey(1500)
if key == -1:
break
char_code = key % 256
if char_code == ord(' '):
# 'pause'
cv2.waitKey(0)
break
if char_code == ord('+'):
scale *= 2
to_show = cv2.resize(img, (int(w1*scale), int(h1*scale)))
continue
if char_code == ord('-'):
scale /= 2
to_show = cv2.resize(img, (int(w1*scale), int(h1*scale)))
continue
if char_code == ord('q'):
exit()
def process(filename):
print('Processing {0}...'.format(filename))
img = cv2.imread(filename)
print(cv_size(img))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# _, region = cv2.threshold(img, 128.0, 255.0, cv2.THRESH_BINARY_INV)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLinesP(edges, rho=1, theta=math.pi/180.0, threshold=80, minLineLength=60, maxLineGap=10)
n = len(lines[0])
for x1, y1, x2, y2 in lines[0]:
cv2.line(gray, (x1, y1), (x2, y2), (0, 255, 0), 2)
angles = sorted([math.atan2(y2-y1, x2-x1) for x1, y1, x2, y2 in lines[0]])
print("There's {0} lines.".format(n))
middle_angle = angles[n / 2] * 180/math.pi
print('The middle angle is: {0}'.format(middle_angle))
if -5.0 < middle_angle < 5.0:
if middle_angle > 0.125 or middle_angle < -0.125:
img_rotated = cv2.warpAffine(
img,
cv2.getRotationMatrix2D(cv_center(img), middle_angle , 1.0),
cv_size(img),
borderMode=cv2.BORDER_CONSTANT,
borderValue=(255, 255, 255)
)
file_, extension = os.path.splitext(filename)
cv2.imwrite('_'+file_+'_'+extension, img_rotated)
else:
print('The angle is too small. No action taken.')
else:
print('The angle is too radical. No action taken.')
show_fit(edges, rotate=False)
show_fit(gray, rotate=False)
def main():
if len(sys.argv) < 2:
print('Usage: {0} <image_with_text_file>...'.format(sys.argv[0]))
map(process, sys.argv[1:])
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,698 | py | 1,039 | deskew.py | 795 | 0.540562 | 0.50027 | 0 | 120 | 29.816667 | 109 |
cogitare-ai/cogitare | 8,340,826,508,456 | f64224361bdee450421cf74b6a72eb3767a3486e | aa60d8aa60a24e34abb155920c454c3768e22a6f | /tests/test_plugins/test_plot.py | 8701ba0977cbeacd05650b4c55dc7393bbb87d60 | [
"MIT"
] | permissive | https://github.com/cogitare-ai/cogitare | fc1db0fe885b7bd8fe4dac0876910365278f3fb0 | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | refs/heads/master | 2021-01-25T01:15:22.892139 | 2019-09-24T11:52:08 | 2019-09-24T11:52:08 | 94,737,131 | 93 | 9 | MIT | false | 2018-02-01T15:25:39 | 2017-06-19T04:41:48 | 2017-12-14T21:11:06 | 2018-02-01T15:25:38 | 2,838 | 1 | 0 | 20 | Python | false | null | import unittest
import os
import tempfile
import pytest
from cogitare.plugins import PlottingMatplotlib
class TestPlottingMatplotlib(unittest.TestCase):
def setUp(self):
self.p = PlottingMatplotlib()
def test_plot(self):
self.p.add_variable('test', 'Test')
with pytest.raises(KeyError) as info:
self.p()
self.assertIn('test', str(info.value))
self.p(test=1)
self.p(test=2)
self.p(test=3)
self.p(test=2)
self.p(test=[1, 2, 3, 4, 5])
self.p(test=0)
self.p(test=-1)
def test_plot_with_std(self):
self.p.add_variable('test', 'Test', use_std=True)
self.p(test=1)
self.p(test=[1, 2, 3, 4])
self.p(test=2)
self.p(test=2)
def test_save_img(self):
f = tempfile.NamedTemporaryFile()
name = f.name + '.png'
p = PlottingMatplotlib(file_name=name)
p.add_variable('test', 'Test', use_std=True)
p(test=1)
p(test=[1, 2, 3, 4])
p(test=2)
f.flush()
self.assertGreater(os.path.getsize(name), 0)
| UTF-8 | Python | false | false | 1,116 | py | 70 | test_plot.py | 46 | 0.55914 | 0.536738 | 0 | 48 | 22.25 | 57 |
nregnault/lsst_cadence | 8,890,582,337,640 | b72757f00dcf38615bf9a49cef5c510fed734ad9 | f74c78ba3efe9668899980b022a6d66aa1f26044 | /sncadence/summary.py | 9fe49e4d62dca3c7587b5397bb1895ad0d161ec8 | [] | no_license | https://github.com/nregnault/lsst_cadence | cee16b73647884ea29228ff0f21f04c17e5753e7 | 3cffb35250276967b2df1ff609329193e835c886 | refs/heads/master | 2021-07-25T20:59:23.238328 | 2021-07-06T01:16:58 | 2021-07-06T01:16:58 | 207,244,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #multiplex cross_prod group_by "analyze[-1]"
import numpy as np
r = get_input()
print '*' * 72
to_merge = glob_parent('sim_history.npy', 'analyze')
print to_merge
data = [np.load(nm) for nm in to_merge]
# merge the history ntuples into a big one
d = np.hstack(data)
# extract just the final state from the NTuples
d_final_state = np.rec.fromrecords([dd[-1].tolist() for dd in data],
names = d.dtype.names)
# dump into the summary segment
combined_sim_history_fn = get_data_fn('combined_sim_history.npy')
np.save(combined_sim_history_fn, d)
final_state_sim_history_fn = get_data_fn('final_state_sim_history.npy')
np.save(final_state_sim_history_fn, d_final_state)
# summary plots
summary_plot_with_labels = get_data_fn('nsn_vs_z_labels.png')
cmd = ['cadence_plots.py', '-o', summary_plot_with_labels, '--title', '$N_{SN} vs. z$ [%s]' % r[0], final_state_sim_history_fn]
logged_subprocess(cmd)
summary_plot_no_labels = get_data_fn('nsn_vs_z_nolabels.png')
cmd = ['cadence_plots.py', '-o', summary_plot_no_labels, '--nolabels', '--title', '$N_{SN} vs. z$ [%s] % r[0]', final_state_sim_history_fn]
logged_subprocess(cmd)
seg_output = r
| UTF-8 | Python | false | false | 1,187 | py | 61 | summary.py | 56 | 0.663016 | 0.657961 | 0 | 40 | 28.675 | 139 |
MrCubanfrog/OHappening | 16,011,638,105,224 | 2aa1254c17d7f1ab019fe7e03a3cf7796533a266 | 2be0e7024f59563dbe8aab9e5e31ead9128562cf | /ohappening/ohappening.py | da827d877dd54fbc44b1eac4bf71f80b93d6cb8d | [
"MIT"
] | permissive | https://github.com/MrCubanfrog/OHappening | 9926a1e4aa5648cda796a9859a967c97f20737ff | a7ea958ec96baa4cac5b8ae42cf53ae81d8f2e4b | refs/heads/master | 2020-04-17T19:43:28.779747 | 2019-11-07T14:22:01 | 2019-11-07T14:22:01 | 166,876,039 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Main module for starting the OHappening application. Run the start function to start the application.
FEATURE LIST
------------ Started Done
-Project initialization and basics [X] [X]
-Working calendar [X] [X]
-Clock [X] [X]
-Various timers [X] [X]
-Syncing with google calendar [X] [X]
-HSL widget [X] [ ]
-Event timer Widget [ ] [ ]
Last updated: 11.02.2019, Ilmo Salmenperä
"""
import sys
import pkg_resources
import logging
from datetime import datetime
from PyQt5.QtWidgets import QMainWindow, QApplication, QGridLayout, QWidget
from PyQt5.QtCore import QTimer
from ohappening.event import Event, EventWidget
from ohappening.eventdescriptor import EventDescriptorWidget
from ohappening.eventlist import EventListWidget
from ohappening.pageheader import PageHeaderWidget
from ohappening.hslwidget import HSLWidget
from ohappening.eventtimer import EventTimerWidget
from ohappening.calendarmanager import CalendarManager
from ohappening.config import CALENDAR_CONFIG_JSON
class OHappenWidget(QWidget):
"""
Class for containing all widgets for OHappening.
"""
def __init__(self, parent, logger):
super().__init__(parent)
self.logger = logger
self.logger.info('Creating Grid layout')
self.layout = QGridLayout(self)
self.layout.setSpacing(0)
self.logger.info('Creating Widgets')
self.page_header_widget = PageHeaderWidget(self, self.logger)
self.event_list_widget = EventListWidget(self, self.logger)
self.event_descriptor_widget = EventDescriptorWidget(self, self.logger)
self.hsl_widget = HSLWidget(self, self.logger)
self.event_timer_widget = EventTimerWidget(self, self.logger)
self.logger.info('Creating Managers')
self.calendar_managers = []
for calendar_config in CALENDAR_CONFIG_JSON['calendars']:
self.calendar_managers.append(CalendarManager(calendar_config, logger))
self.layout.addWidget(self.page_header_widget, 0, 0, 1, 5)
self.layout.addWidget(self.event_list_widget, 1, 0, 4, 3)
self.layout.addWidget(self.hsl_widget, 1, 3, 1, 2)
self.layout.addWidget(self.event_descriptor_widget, 2, 3, 2, 2)
self.layout.addWidget(self.event_timer_widget, 4, 3, 1, 2)
self.setLayout(self.layout)
self.logger.info('Creating Timer')
start_clock_timer = QTimer(self)
start_clock_timer.setSingleShot(True)
start_clock_timer.timeout.connect(self.startMainTimer)
start_clock_timer.start(1000 * (60 - datetime.now().second))
self.updateCalendarElements()
def startMainTimer(self):
"""
Function for starting the main clock counter
"""
self.minute_timer = QTimer(self)
self.minute_timer.timeout.connect(self.minuteChanged)
self.minute_timer.start(60 * 1000)
self.minuteChanged()
def minuteChanged(self):
"""
Function that will be called every time the minute in this computer changes
"""
self.logger.info("Minute Change!")
self.updateCalendarElements()
def updateCalendarElements(self):
"""
Function that updates all calendar elements and fetches new elements from the calendars
"""
events = []
for calendar in self.calendar_managers:
events.extend(calendar.fetchEvents())
self.page_header_widget.updatePageHeader()
self.event_list_widget.updateEvents(events)
self.event_descriptor_widget.setNewEventToDescriptor()
self.hsl_widget.updateHslWidget()
class OHappenWindow(QMainWindow):
"""
Class for containing all functionality in OHappening. The heart of it all.
"""
def __init__(self, screen_size, debug):
super().__init__()
self.initLogging(debug)
self.setStyleSheet('background-color : white')
self.logger.info('Initializing OHappenWindow variables')
OHAPPENING_VERSION = pkg_resources.require("OHappening")[0].version
self.title = "OHappening {0}".format(OHAPPENING_VERSION)
self.setWindowTitle(self.title)
self.logger.info('Adding OHappenWidget to MainWindow')
self.setCentralWidget(OHappenWidget(self, self.logger))
self.centralWidget().layout.setContentsMargins(0, 0, 0, 0)
self.logger.info('Program initialized. Showing fullscreen')
self.showFullScreen()
def initLogging(self, debug):
"""
Initialize all logging related stuff.
"""
self.logger = logging.Logger('OHappening Log')
ch = logging.StreamHandler()
if debug:
self.logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.debug("Logger initialized")
def start(debug = False):
"""
Starting function for the application. Creates the QApplication and such.
Setting debug to True will enable debugging logs.
"""
if len(sys.argv) == 2 and sys.argv[1] == 'DEBUG':
debug = True
app = QApplication(sys.argv)
screen_size = QApplication.desktop().screenGeometry()
ex = OHappenWindow(screen_size, debug)
sys.exit(app.exec_())
if __name__ == "__main__":
start()
| UTF-8 | Python | false | false | 5,659 | py | 12 | ohappening.py | 11 | 0.651159 | 0.642137 | 0 | 158 | 34.778481 | 101 |
samarthdave/nagini | 15,238,544,001,996 | 26bad96ff5fb32885b238c0f734e67c04b42088a | aab0245630a8cc69432ab0d588544c4c40f199f5 | /using-python-for-automation/scrape/club.py | e735f3a5881de0b039731f276dd4ff1c48f4aca2 | [] | no_license | https://github.com/samarthdave/nagini | 806134f40cc5ef3392636764c40b3b1fe439e005 | cd61125843756832b890eb98eccc3fd03aff3218 | refs/heads/master | 2022-12-21T20:44:46.621229 | 2021-12-30T20:40:56 | 2021-12-30T20:40:56 | 248,650,956 | 0 | 0 | null | false | 2022-12-12T20:03:06 | 2020-03-20T02:30:53 | 2021-12-30T20:41:05 | 2022-12-12T20:03:06 | 976 | 0 | 0 | 12 | Jupyter Notebook | false | false | # use ScrapingClub.com > Exercise 3
# imports
import requests
from bs4 import BeautifulSoup
base_url = 'https://scrapingclub.com/exercise/list_basic/'
resp = requests.get(base_url)
soup = BeautifulSoup(resp.text, 'lxml')
# find all cards
count = 0
items = soup.find_all('div', class_='col-lg-4 col-md-6 mb-4')
# text properties: name - card-title, price - h5 tag
for block in items:
count += 1
itemName = block.find('h4', class_='card-title').text.strip('\n')
itemPrice = block.find('h5').text
print('{0}: {1:<30} - {2}'.format(count, itemName, itemPrice))
# PAGINATION
# hold all urls (prev, 1, 2, ... 5, next)
page_refs = soup.find('ul', class_='pagination')
# append to these arrays with .../...?page=N
urls = []
full_urls = []
# get all anchor tags
links = page_refs.find_all('a', class_='page-link')
for link in links:
pageNum = int(link.text) if link.text.isdigit() else None
# if not None (since None is "falsey")
if pageNum:
x = link.get('href')
urls.append(x)
full_urls.append(base_url + x)
# print all hrefs
print('='*20)
for i in full_urls:
print(i)
print('='*20)
# ask user if should paginate through all URLs
prompt_resp = input("Explore the above URLs found in pagination (yes/no)? ")
should_explore = prompt_resp.lower() == 'yes'
# convert all to real urls from base url
for url in full_urls:
# if shouldn't make request to all paths then break
if not should_explore:
break
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'lxml')
# find all cards
items = soup.find_all('div', class_='col-lg-4 col-md-6 mb-4')
# text properties: name - card-title, price - h5 tag
for block in items:
count += 1
itemName = block.find('h4', class_='card-title').text.strip('\n')
itemPrice = block.find('h5').text
print('{0}: {1:<30} - {2}'.format(count, itemName, itemPrice)) | UTF-8 | Python | false | false | 1,917 | py | 70 | club.py | 44 | 0.637976 | 0.62024 | 0 | 62 | 29.935484 | 76 |
carlgval/python-challenges | 4,131,758,582,345 | cff275ce805925e24637ce0960bd8796963c610d | 50ec6753b6d96fafc6793178819cff9d759c64aa | /hanoi_tower_moves.py | b90a55965c8bf3945c093ef52792291e06f04019 | [] | no_license | https://github.com/carlgval/python-challenges | 5a292273ea471b5ca217fcae687f64190e34c650 | 26d1088c519dab980e7ed96e01d7250d0d2111bf | refs/heads/master | 2020-03-29T19:01:54.664708 | 2019-03-15T13:12:11 | 2019-03-15T13:12:11 | 150,244,076 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 10 08:21:13 2019
The Tower of Hanoi is a puzzle game with three rods and n disks, each a
different size.
All the disks start off on the first rod in a stack. They are ordered by size,
with the largest disk on the bottom and the smallest one at the top.
The goal of this puzzle is to move all the disks from the first rod to the
last rod while following these rules:
You can only move one disk at a time.
A move consists of taking the uppermost disk from one of the stacks and
placing it on top of another stack.
You cannot place a larger disk on top of a smaller disk.
Write a function that prints out all the steps necessary to complete the
Tower of Hanoi. You should assume that the rods are numbered, with the first
rod being 1, the second (auxiliary) rod being 2, and the last (goal) rod
being 3.
For example, with n = 3, we can do this in 7 moves:
Move 1 to 3
Move 1 to 2
Move 3 to 2
Move 1 to 3
Move 2 to 1
Move 2 to 3
Move 1 to 3
@author: carlgval
"""
def moves_hanoi_tower(levels):
if levels > 1:
moves = 1 + moves_hanoi_tower(levels - 1) * 2
else:
moves = 1
return moves
if __name__ == '__main__':
print(moves_hanoi_tower(3))
print(moves_hanoi_tower(5))
print(moves_hanoi_tower(7))
| UTF-8 | Python | false | false | 1,332 | py | 121 | hanoi_tower_moves.py | 120 | 0.692943 | 0.662162 | 0 | 51 | 25.117647 | 78 |
mutaku/Stumpy | 7,301,444,422,093 | 34a7c718ae58fada3d378ddcc53242a37e3ebc59 | 0b1c671105470f218b4381536e9d9ed42559af92 | /stumpy.fcgi | ded8ffa445d020b18c02aae0f2e412cd8eb5ed7b | [
"BSD-3-Clause"
] | permissive | https://github.com/mutaku/Stumpy | ccc8c9944955e2585570c289e172dd3d5c5ab8dd | 18c54a73bf612f41eb45f97074d7427b897314c8 | refs/heads/master | 2021-01-16T18:28:00.272688 | 2012-08-30T14:37:49 | 2012-08-30T14:37:49 | 1,561,100 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import sys, os
sys.path.insert(0, "..")
os.environ['PYTHON_EGG_CACHE'] = "/tmp/"
os.environ['DJANGO_SETTINGS_MODULE'] = "settings"
from django.core.servers.fastcgi import runfastcgi
runfastcgi(["method=threaded", "daemonize=false", "debug=1"])
| UTF-8 | Python | false | false | 273 | fcgi | 20 | stumpy.fcgi | 10 | 0.699634 | 0.692308 | 0 | 11 | 23.545455 | 61 |
chaman21/sparking | 10,514,079,964,456 | 29af3d61cb9e6ede1a675787af702a0f007ffb7d | bb525d0eac7f7722e864066671f45dfbd5eb9e05 | /banking/banking/migrations/0006_auto_20210911_1515.py | 206a4e5f66046cb6142c28abbd68d457df68698d | [] | no_license | https://github.com/chaman21/sparking | 77b9768c58046ccd3e9dd16a9b022648f255d7a5 | f4f742b52888f9330220b35099b7aa2ce7f70c8c | refs/heads/master | 2023-07-31T18:56:49.507135 | 2021-09-12T16:38:44 | 2021-09-12T16:38:44 | 405,690,639 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.6 on 2021-09-11 09:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('banking', '0005_auto_20210911_1507'),
]
operations = [
migrations.RemoveField(
model_name='trans',
name='email',
),
migrations.RemoveField(
model_name='trans',
name='mobile',
),
migrations.RemoveField(
model_name='trans',
name='name',
),
]
| UTF-8 | Python | false | false | 528 | py | 19 | 0006_auto_20210911_1515.py | 13 | 0.522727 | 0.464015 | 0 | 25 | 20.12 | 47 |
IUIUN/The-Best-Time-Is-Now | 1,717,986,951,932 | c751e368f8c8fe96376b68f5e2bbf7c41c934bd9 | fa9bae32c203323dfb345d9a415d4eaecb27a931 | /938. Range Sum of BST.py | 433c73667ea0f5e2d3115bcf55ea0855d3eee222 | [] | no_license | https://github.com/IUIUN/The-Best-Time-Is-Now | 48a0c2e9d449aa2f4b6e565868a227b6d555bf29 | fab660f98bd36715d1ee613c4de5c7fd2b69369e | refs/heads/master | 2020-09-14T12:06:24.074973 | 2020-02-15T06:55:08 | 2020-02-15T06:55:08 | 223,123,743 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
self.res = 0
self.dfs(root, L, R, self.res)
return self.res
def dfs(self, root, L, R, res):
if not root:
return
if L <= root.val <= R:
self.res += root.val
if root.val < R:
self.dfs(root.right, L, R, self.res)
if root.val > L:
self.dfs(root.left, L, R, self.res) | UTF-8 | Python | false | false | 458 | py | 103 | 938. Range Sum of BST.py | 103 | 0.4869 | 0.484716 | 0 | 15 | 29.533333 | 65 |
Gaurangsharma/guiv | 18,210,661,351,740 | 53d8419dfa784b562f2f2f908591ffc6b70570f9 | ad04e9358184eee37931bb7fecc010292152d3d8 | /codechef/goal.py | e4c4678a2be391ec058d4ea415a2ff03d9ece873 | [] | no_license | https://github.com/Gaurangsharma/guiv | 70421e50fa24ee1174600d7a965f1a83b8f21f18 | acead57d2e3ac84632adceafe86f57a9828ca9bd | refs/heads/master | 2021-06-28T19:47:12.656147 | 2020-09-15T17:45:27 | 2020-09-15T17:45:27 | 152,371,136 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | for _ in range(int(input())):
n=int(input()) #n initially
goals=list(input())
win_point=(n//2)+1
count=0
k=False
team_A,team_B=0,0
for i in range(len(goals)):
if i%2==0:
if goals[i]=='1':
team_A+=1
else:
if goals[i]=='1':
team_B+=1
if team_A==team_B:
if team_A>=win_point:
print(i+2)
k=True
break
if team_B>=win_point:
print(i+1)
k=True
break
if k==False:
print(len(goals)) #n ended | UTF-8 | Python | false | false | 625 | py | 168 | goal.py | 165 | 0.3952 | 0.3744 | 0 | 25 | 24.04 | 36 |
mattseddon/billy_cart | 10,617,159,189,152 | fbf43c30f087cfdc7be0cea2c189085996ccea77 | 6cd7a0aae71903c7d2dc147662bb3e0d9b482320 | /app/market/model/handler.py | 2f54ed136f095b6fe0855efc2183a801ec99dcb6 | [] | no_license | https://github.com/mattseddon/billy_cart | d58d863037d0c9f8cfd49112ffb55a441f818fa6 | ab3272dc3c184bb4f7149c155c998f5e48dc54d6 | refs/heads/master | 2022-01-10T21:40:16.068032 | 2022-01-02T04:01:37 | 2022-01-02T04:01:37 | 209,914,286 | 0 | 0 | null | false | 2021-06-02T02:22:50 | 2019-09-21T02:44:38 | 2021-04-06T20:48:07 | 2021-06-02T02:22:50 | 448 | 0 | 0 | 0 | Python | false | false | from app.colleague import Colleague
from infrastructure.third_party.adapter.numpy_utils import calculate_log, not_a_number
from infrastructure.built_in.adapter.copy_utils import make_copy
class ModelHandler(Colleague):
def __init__(self, mediator, wls_model, event_country="AU"):
self.__event_country = event_country
self.wls_model = wls_model
Colleague.__init__(self, mediator=mediator)
self.__market_back_size = None
def run_models(self, items):
self.__market_back_size = self.__get_market_back_size(items=items)
results = []
for item in items:
if (
self._meets_wlr_criteria(item=item)
and self._meets_wlr_threshold(item=item)
and self._has_overlay(
item=item, probability="compositional_sp_probability_pit"
)
):
has_value = self.__standardise_result(
item=item,
probability="compositional_sp_probability_pit",
order_type="BUY",
model_id="SPMB",
ex_price="ex_offered_back_price_pit",
returns_price="ex_offered_back_price_mc_pit",
)
results.append(has_value)
continue
elif (
self._meets_high_back_size_threshold(item=item)
and self.__event_country == "AU"
and self._has_overlay(
item=item, probability="compositional_ex_average_probability_pit"
)
):
has_value = self.__standardise_result(
item=item,
probability="compositional_ex_average_probability_pit",
order_type="BUY",
model_id="MBG2",
ex_price="ex_offered_back_price_pit",
returns_price="ex_offered_back_price_mc_pit",
)
results.append(has_value)
continue
elif (
self._meets_low_back_size_threshold(item=item)
and self.__event_country == "AU"
and self._has_overlay(
item=item, probability="compositional_ex_average_probability_pit"
)
):
has_value = self.__standardise_result(
item=item,
probability="compositional_ex_average_probability_pit",
order_type="BUY",
model_id="MBL2",
ex_price="ex_offered_back_price_pit",
returns_price="ex_offered_back_price_mc_pit",
)
results.append(has_value)
continue
return (
self._mediator.notify(event="models have results", data=results)
if results
else self._mediator.notify(event="finished processing", data=None)
)
def __standardise_result(
self, item, probability, order_type, model_id, ex_price, returns_price
):
has_value = {}
has_value["id"] = item.get("id")
has_value["probability"] = item.get(probability)
has_value["type"] = order_type
has_value["model_id"] = model_id
has_value["ex_price"] = item.get(ex_price)
has_value["returns_price"] = item.get(returns_price)
return has_value
def _has_overlay(self, item, probability):
return item.get(probability) > (1 / item.get("ex_offered_back_price_mc_pit"))
def _meets_wlr_criteria(self, item):
y = self._get_log_returns(y=item.get("compositional_sp_back_price_ts"))
self.wls_model.run(
y=y,
x=item.get("extract_time_ts"),
weights=item.get("combined_back_size_ts"),
)
alpha = self.wls_model.get_alpha()
Beta = self.wls_model.get_Beta()
return Beta < 0 and alpha < -0.00001
def _meets_wlr_threshold(self, item):
back_size = item.get("combined_back_size_pit")
return (
back_size >= 5000 and self.__event_country != "GB"
) or back_size >= 30000
def _meets_high_back_size_threshold(self, item):
back_size = item.get("combined_back_size_pit")
return (
item.get("ex_offered_back_price_pit") > 2
and (back_size / self.__market_back_size) >= 0.6
and back_size >= 20000
)
def _meets_low_back_size_threshold(self, item):
back_size = item.get("combined_back_size_pit")
offered_back_price = item.get("ex_offered_back_price_pit")
return (
offered_back_price <= 2
and (back_size / self.__market_back_size)
>= max([0.6, (1 / offered_back_price)])
and back_size >= 10000
)
def _get_log_returns(self, y):
data = make_copy(y)
shifted_list = make_copy(data)
shifted_list.pop()
shifted_list.insert(0, not_a_number())
return [
calculate_log(point_in_time / previous_point_in_time)
for point_in_time, previous_point_in_time in zip(data, shifted_list)
]
def __get_market_back_size(self, items):
market_back_size = 0
for item in items:
market_back_size += item.get("combined_back_size_pit")
return market_back_size
| UTF-8 | Python | false | false | 5,435 | py | 82 | handler.py | 75 | 0.531371 | 0.524379 | 0 | 148 | 35.722973 | 86 |
WillahScott/adventofcode | 11,982,958,792,593 | ca137c4e715fcfc29b95e0369d74e4d43f6380d7 | e109c76e1d244dd6f1c58bb585699f40b649ed7f | /day11.py | 22ab48bbdad87878b555a3fcb54b0668db7e4e58 | [] | no_license | https://github.com/WillahScott/adventofcode | 20d386998b38e8da184552ee41ae861e19a8e358 | 8a6c5c9b5cbe04191a59fda419a4dfc3d997d8c1 | refs/heads/master | 2021-05-06T08:41:26.788335 | 2017-12-30T14:59:11 | 2017-12-30T14:59:11 | 114,059,155 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Advent of code - DAY 11
from itertools import groupby
# Read data
with open('data/d11.txt') as f:
new_data = f.read().split(',')
# -- Problem 1 function --
def get_path_dict(steps):
gpb = groupby(sorted(steps))
return { i: len(list(v)) for i, v in gpb }
OPP = [('n', 's'), ('ne', 'sw'), ('nw', 'se')]
DIAG = { 'n': {'sw': 'nw', 'se': 'ne'},
'nw': {'s': 'sw', 'ne': 'n'},
'ne': {'s': 'se', 'nw': 'n'},
's': {'nw': 'sw', 'ne': 'se'},
'sw': {'n': 'nw', 'se': 's'},
'se': {'n': 'ne', 'sw': 's'},
}
def reduce_path(long_path_d, verbose=False):
int_path = []
# reduce opposites - sort by max to min
for i, j in OPP:
_steps = long_path_d[i] - long_path_d[j]
int_path.append([i if _steps > 0 else j, abs(_steps)])
int_path.sort(key=lambda x: x[1], reverse=True)
if verbose:
print('No opps:', int_path)
# reduce diagonals
red_path = {}
while int_path:
k_red, v_red = int_path.pop(0) # to reduce
pos_reds = DIAG[k_red] # possible reductions
for k, v in int_path:
if k in pos_reds:
# reduce and remove the other with which we reduced
red_path[pos_reds[k]] = red_path.get(pos_reds[k],0) + v
int_path.remove([k,v])
left = v_red - v
if left > 0: # if there is still left
int_path.append( [k_red, left] )
int_path.sort(key=lambda x: x[1], reverse=True)
break
else:
# the item is irreducible
red_path[k_red] = red_path.get(k_red,0) + v_red
if verbose:
print('PATH:', red_path )
return sum(red_path.values())
# Test
# > ?
# Run problem 1
reduce_path( get_path_dict(new_data), verbose=True )
# > 707 ## PATH: {'n': 317, 'nw': 390}
# -- Problem 2 function --
PAST_PATH = []
max_steps = 0
for d in new_data:
PAST_PATH.append(d)
max_steps = max( max_steps, reduce_path(get_path_dict(PAST_PATH)) )
# Run problem 2
print(max_steps)
# > 1490
| UTF-8 | Python | false | false | 2,104 | py | 23 | day11.py | 22 | 0.499525 | 0.485741 | 0 | 85 | 23.741176 | 72 |
shwang0416/Jungle_week03 | 1,090,921,699,946 | f30b6497d0fbbe50e22725fc90a144dbb673d518 | 3d02bab6338a4984f5fba18165e60820b2dc0a47 | /07_1987_알파벳.py | a28f35df197343c157c51a221de1939f914629ed | [] | no_license | https://github.com/shwang0416/Jungle_week03 | 15fdceb2929d238c5154e5bcef9d46c5d4f5bda6 | 25be00f7db6c679ae07f014021f45c1394f42fd2 | refs/heads/master | 2023-02-03T15:54:14.896061 | 2020-12-28T13:57:47 | 2020-12-28T13:57:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # [백준] https://www.acmicpc.net/problem/1987 알파벳
# [참고] https://it-garden.tistory.com/272
# DFS, 백트래킹
import sys
sys.stdin = open('test/07.txt')
sys.setrecursionlimit(100000)
# input
Y, X = map(int, sys.stdin.readline().split())
alpha = [list(map(lambda x : ord(x)-65, sys.stdin.readline()))for i in range(Y)]
# 방문한 적이 없어야하고, 갖고 있지 않은 알파벳이어야 한다.
visited = [0 for _ in range(26)]
ans = 1
dy = [-1,0,1,0]
dx = [0,-1,0,1]
def find_len(y,x,cnt):
visited[alpha[y][x]] = 1
global ans
ans = max(ans, cnt)
# for dx,dy in [[-1,0],[1,0],[0,-1],[0,1]]:
# nx = dx+x
# ny = dy+y
# 이거 대신에 dx dy 배열로 바꿨더니 시간초과가 안나고 통과됨!!
for i in range(4):
ny = y + dy[i]; nx = x + dx[i]
if 0 <= nx < X and 0 <= ny < Y:
if visited[alpha[ny][nx]] == 0:
find_len(ny,nx,cnt+1)
visited[alpha[ny][nx]] = 0
find_len(0,0,ans)
sys.stdout.write(str(ans))
| UTF-8 | Python | false | false | 1,042 | py | 22 | 07_1987_알파벳.py | 17 | 0.536717 | 0.487041 | 0 | 35 | 25.342857 | 80 |
neoareslinux/python | 14,130,442,451,842 | 6106c155fade54e6a3671f2e776af6bedd9dff08 | f4c13cc816445c2d3d454e67f09430c969c61813 | /ser-net/backpexpect.py | dda11931aec167aeeca95a63d10751972e85c8af | [] | no_license | https://github.com/neoareslinux/python | c669818aa185808777126465aa04c221e96cb04d | 383a77639c33e6135d22c6c1eecc3756eef3918c | refs/heads/master | 2015-08-13T05:29:25.749984 | 2014-10-29T13:30:18 | 2014-10-29T13:30:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#coding=utf-8
import pexpect
import sys
import re
import tempfile
import getpass
username = 'test'
password = 'test'
new = pexpect.spawn('telnet 1.1.1.2',timeout=3)
index = new.expect(['login: ','Username',pexpect.TIMEOUT,"(?i)Unknown host"])
#this if could identify the switches of juniper
if index == 0:
new.sendline(username)
new.expect('Password:')
new.sendline(password)
new.expect(['% Login failed!','Login incorrect'])
elif index == 1:
new.sendline(username)
new.expect('Password:')
new.sendline(password)
i = new.expect(['% Login failed!','>','The initial password poses'])
if i == 0:
print 'wrong password'
new.close()
elif i == 1:
sysname_info = new.before.strip()
sysname = list(sysname_info)
sysname = sysname[1:]
switch_name = ''.join(sysname)
print switch_name
new.sendline('n')
index2 = new.expect([' % Incomplete command','nn'])
if index2 == 0:
print 'device is h3c swich'
new.sendline('dis interface Bridge-Aggregation')
new.sendline(' '*10)
new.sendline('dis link-aggregation verbose')
new.sendline(' '*10)
new.sendline('quit')
print new.before
elif i == 2:
new.sendline('n')
new.sendline(' ')
print 'device is huawei switch'
new.sendline('quit')
print new.before
elif index == 2:
new.close()
print new
print 'time out'
elif index == 3:
print 'unkonw host'
###############this piece of code is to identify h3c,huawei and others
############################################################
new.expect(pexpect.EOF)
#create temporary file
tfile = tempfile.TemporaryFile()
tfile.write(new.before)
tfile.seek(0)
number = 0
mydict = {}
for line in tfile:
m1 = re.search('current state',line)
if m1:
list1 = re.split(r'\s+',line)
a = re.findall(r'Bridge-Agg.*',line)[0]
a = re.split(r'\s+',a)
print a
x= tfile.next()
x= tfile.next()
x= tfile.next()
m2 = re.search('speed mode',x)
if m2:
list2 = re.split(r'\s+',x)
mydict[list1[1]] = {}
mydict[list1[1]]['state'] = a[3]
mydict[list1[1]]['speed'] = list2[1]
m2 = re.search('Aggregation Interface',line)
if m2:
list3 = re.split(r'\s+',line)
for subline in tfile:
m3 =re.search('Oper-Key',subline)
if m3:
mydict[list3[2]]['interface'] = {}
x = tfile.next()
while True:
x = tfile.next()
m3 = re.search('GE',x)
if m3:
list4 = re.split(r'\s+',x)
mydict[list3[2]]['interface'][list4[1]] = list4[2]
# mydict[list[3]]['interface']
else:
break
break
l = mydict.keys()
for i in l:
if mydict[i]['interface'] == {}:
mydict[i]['info'] = '该聚合组没有成员端口'
print mydict
| UTF-8 | Python | false | false | 2,681 | py | 31 | backpexpect.py | 31 | 0.607666 | 0.58587 | 0 | 131 | 19.312977 | 77 |
KEYuni/UAS-Pemrosesan-Teks | 6,708,738,925,961 | 60a3569fb2a96e8d008c25d01f69137157f35232 | 340e965d3fc9897104a33d09173d5e106017a6dc | /uas.py | 97c583def2a44c06ede8251f66a72c16166f4e00 | [] | no_license | https://github.com/KEYuni/UAS-Pemrosesan-Teks | bcadda8b9b07b4d00b3a41cfcd227af5d1c61ac2 | 93370369d9ed0e46a436345fd4fef86a16cf8a1a | refs/heads/master | 2020-03-19T05:17:51.184878 | 2018-06-09T03:40:51 | 2018-06-09T03:40:51 | 135,917,971 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Program Name : Autogenerate Keyword
#Author : Yuniarti Musa'adah
#Email : yuniartimusaadah@gmail.com
#================================================================================================
# LIBRARY YANG DIGUNAKAN DALAM PROGRAM
#================================================================================================
#import library yang dibutuhkan dalam program
import numpy as np
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from nltk.corpus import stopwords
from stemming.porter2 import stem
#________________________________________________________________________________________________
#================================================================================================
# PROSEDUR DAN FUNGSI DALAM PROGRAM
#================================================================================================
# ==== 1. fungsi menghapus stopword
def removeStopWords(sentence):
dQ = vectorizer.fit_transform({sentence})
dN = vectorizer.get_feature_names()
dN = [w for w in dN if not w in stop_words]
senRemoved = ' '.join(w for w in dN)
return senRemoved
# ==== 2. prosedur sorting dari yang paling besar ke yang paling kecil
def sorting(hasilAkhir, database, databaseStem):
#sorting database dari yang paling mendekati query
tukar = 1
while(tukar == 1):
tukar = 0
for i in range(0, len(database)-1):
#jika nilai TFIDF ke i lebih kecil dari nilai TFIDF selanjutnya
if(hasilAkhir[i] < hasilAkhir[i+1]):
#menukar posisi nilai TFIDF ke i dengan nilai TFIDF ke i+1
temp = hasilAkhir[i]
hasilAkhir[i] = hasilAkhir[i+1]
hasilAkhir[i+1] = temp
#menukar posisi ke i dengan i+1 pada database yang sudah di stem
tempTerm1 = databaseStem[i]
databaseStem[i] = databaseStem[i+1]
databaseStem[i+1] = tempTerm1
#menukar posisi ke i dengan ke i+1 pada database awal
tempTerm = database[i]
database[i] = database[i+1]
database[i+1] = tempTerm
tukar = 1
#________________________________________________________________________________________________
#================================================================================================
# ALUR AUTO-GENERATE KEYWORDS
#================================================================================================
# deklarasi vectorizer
vectorizer = CountVectorizer()
# 1. MEMBACA FILE ===============================================================================
# Membaca file memasukannya ke dalam list "database"
# -- Memasukan data Training ke database
database = open("coba.501").read().split('.')
# -- Menghilangkan kalimat yang kosong / tidak mengandung kata apapun
for i in range(0, len(database)):
if not database[i]:
database.pop(i)
# 2. STOPWORD REMOVAL ===========================================================================
# deklarasi stopword untuk B.Inggris
stop_words = set(stopwords.words('english'))
for i in range(0, len(database)):
database[i] = removeStopWords(database[i])
# 3. STEMMING DATABASE ==========================================================================
# Men-Stemming setiap kata dari kalimat-kalimat di database
databaseStem = database
databaseStem = [' '.join(stem(word) for word in sentence.split(" ")) for sentence in databaseStem]
dbStem = [[stem(word) for word in sentence.split(" ")] for sentence in databaseStem]
#for i in range(0, len(database)):
# print(i+1, database[i])
# 5. MENENTUKAN NILAI TF-IDF DARI SETIAP KALIMAT DALAM DOKUMEN ==================================
termNoun = [] # variable yang menampung term-term yang berupa noun dari kalimat
newSentence = [] # untuk menampung gabungan term-term noun menjadi 1 kalimat
# pos tagging setiap kata menggunakan nltk pos_tag
for i in range(0, len(database)):
# tokenizing kalimat
tokens = nltk.word_tokenize(database[i])
# melakukan pos tagging pada setiap kata dalam kalimat
tagged = nltk.pos_tag(tokens)
if i == 1 :
print(tokens)
print(tagged)
for j in range (0, len(tagged)):
# mencari kata-kata yang berupa Noun (NN, NNP, NNPS, NNS) dilihat dari huruf awal
log = (tagged[j][1][0] == 'N')
# jika noun, maka katanya di stem dan dimasukan ke variable termNoun
if log == True:
hStem = stem(tagged[j][0])
termNoun.append(hStem)
# kata-kata noun yang telah didapatkan digabungkan menjadi 1 kalimat
newSentence.append(' '.join(w for w in termNoun))
# mengosongkan kembali variable termNoun
del termNoun[:]
# 4. MENENTUKAN FEATURE-FEATURE DARI DATABASE ===================================================
X = vectorizer.fit_transform(databaseStem)
# variable yang menampung semua nama feature dari database
db_FeatureName = vectorizer.get_feature_names()
# deklarasi tfidf transformer
transformer = TfidfTransformer(smooth_idf=False)
# menghitung nilai TFIDF dari database yang hanya berisi noun ===================================
# mentransform kalimat yang hanya berisi noun
kalimatNoun = vectorizer.transform(newSentence)
# variable untuk menampung nilai tfidf, nilai minimal 0.5
arrayNoun = np.full(((len(database)),len(db_FeatureName)), 0.5)
# memasukan nilai TFIDF
for i in range(0, len(databaseStem)):
for j in range(0, len(db_FeatureName)):
if (kalimatNoun.toarray()[i][j] is not 0) :
arrayNoun[i][j] = arrayNoun[i][j] + kalimatNoun.toarray()[i][j]
tfidf_Noun = transformer.fit_transform(arrayNoun)
naNoun = []
for i in range(0, len(databaseStem)) :
naNoun.append(0)
for j in range(0, len(db_FeatureName)):
naNoun[i] = naNoun[i] + tfidf_Noun.toarray()[i][j]
# menghitung nilai TFIDF dari database yang lengkap =============================================
kalimatFull = vectorizer.transform(databaseStem)
arrayFull = np.full(((len(database)),len(db_FeatureName)), 0.5)
for i in range(0, len(databaseStem)):
for j in range(0, len(db_FeatureName)):
if (kalimatFull.toarray()[i][j] is not 0) :
arrayFull[i][j] = arrayFull[i][j] + kalimatFull.toarray()[i][j]
tfidf_Full = transformer.fit_transform(arrayFull)
naFull = []
for i in range(0, len(databaseStem)) :
naFull.append(0)
for j in range(0, len(db_FeatureName)):
naFull[i] = naFull[i] + tfidf_Full.toarray()[i][j]
# menghitung nilai akhir TFIDF dari setiap kalimat dalam dokumen
# (jumlah TFIDF kata Noun dibagi TFIDF kata lengkap)
hasilAkhir = []
for i in range(0, len(databaseStem)):
hasilAkhir.append(naNoun[i] / naFull[i])
# mensorting kalimat dari nilai TFIDF terbesar sampai terkecil
sorting(hasilAkhir, database, databaseStem)
# mengcopykan kalimat 3 teratas
kalimatTop = database[0:3]
# stemming kalimat 3 teratas
topStem = [[stem(word) for word in sentence.split(" ")] for sentence in kalimatTop]
# kalimat 3 teratas dijadikan 1 kalimat
kalimatTopStem = [' '.join(stem(word) for word in sentence.split(" ")) for sentence in databaseStem]
gabungTop = [[(word) for word in sentence.split(" ")] for sentence in kalimatTop]
#print("gabungTop : ", gabungTop)
#print("=====")
#print(topStem)
abcd = kalimatTopStem[0:3]
a10 = ' '.join(word for word in abcd)
Y = vectorizer.fit_transform({a10})
b10 = vectorizer.get_feature_names()
kandidatKey = []
nilaiKandidat = []
asliWord = []
for one in range(0, len(topStem)):
for i in range(0, len(topStem[one])) :
jumlah = 0
for j in range(0, len(dbStem)) :
for k in range(0, len(dbStem[j])) :
if dbStem[j][k] == topStem[one][i]:
jumlah = jumlah + 1
kandidatKey.append(topStem[one][i])
nilaiKandidat.append(jumlah * hasilAkhir[one])
asliWord.append(gabungTop[one][i])
sorting(nilaiKandidat, kandidatKey, asliWord)
i= 0
status = 0
while (status == 0) :
status = 1
for j in range(i+1, len(kandidatKey)-1):
if(kandidatKey[i] == kandidatKey[j]):
status = 0
kandidatKey.pop(j)
nilaiKandidat.pop(j)
asliWord.pop(j)
i = i + 1
#print(kandidatKey)
#print(nilaiKandidat)
print(asliWord[0], ", ", asliWord[1], ", ", asliWord[2], ", ", asliWord[3], ", ", asliWord[4])
#print(tfidf.toarray())
#
#for i in range(0, len(hasilStemming)):
# print("Stem : ", i+1, " : ", hasilStemming[i])
| UTF-8 | Python | false | false | 8,775 | py | 3 | uas.py | 1 | 0.561254 | 0.550427 | 0 | 253 | 33.652174 | 100 |
nukui-s/mlens | 10,471,130,270,689 | ceb33a76a9caf6cdec83f096bf39ea74a04b5892 | 141166565426af8233782bd8de6f3c4a5d227cea | /mlens/parallel/tests/test_a_learner_full.py | 4762b3cc7c1ce6e9df8c4db7a5661291a17eddf4 | [
"MIT"
] | permissive | https://github.com/nukui-s/mlens | 4fa1ee513494120c0a3e34a659d6ce701989f221 | 91d89c8daff6da508d5d5577349fba051b1c8eb9 | refs/heads/master | 2020-03-19T06:08:52.188474 | 2018-06-04T10:39:38 | 2018-06-04T10:39:38 | 135,994,418 | 0 | 0 | MIT | true | 2018-06-04T10:39:39 | 2018-06-04T08:28:45 | 2018-06-04T08:28:48 | 2018-06-04T10:39:38 | 10,368 | 0 | 0 | 0 | Python | false | null | """"ML-ENSEMBLE
Testing suite for Learner and Transformer
"""
from mlens.testing import Data, EstimatorContainer, get_learner, run_learner
def test_predict():
"""[Parallel | Learner | Full | No Proba | No Prep] test fit and predict"""
args = get_learner('predict', 'full', False, False)
run_learner(*args)
def test_transform():
"""[Parallel | Learner | Full | No Proba | No Prep] test fit and transform"""
args = get_learner('transform', 'full', False, False)
run_learner(*args)
def test_predict_prep():
"""[Parallel | Learner | Full | No Proba | Prep] test fit and predict"""
args = get_learner('predict', 'full', False, True)
run_learner(*args)
def test_transform_prep():
"""[Parallel | Learner | Full | No Proba | Prep] test fit and transform"""
args = get_learner('transform', 'full', False, True)
run_learner(*args)
def test_predict_proba():
"""[Parallel | Learner | Full | Proba | No Prep] test fit and predict"""
args = get_learner('predict', 'full', True, False)
run_learner(*args)
def test_transform_proba():
"""[Parallel | Learner | Full | Proba | No Prep] test fit and transform"""
args = get_learner('transform', 'full', True, False)
run_learner(*args)
def test_predict_prep_proba():
"""[Parallel | Learner | Full | Proba | No Prep] test predict"""
args = get_learner('predict', 'full', True, True)
run_learner(*args)
def test_transform_prep_proba():
"""[Parallel | Learner | Full | Proba | Prep] test transform"""
args = get_learner('transform', 'full', True, True)
run_learner(*args)
| UTF-8 | Python | false | false | 1,611 | py | 33 | test_a_learner_full.py | 32 | 0.640596 | 0.640596 | 0 | 53 | 29.396226 | 81 |
suneric/aircraft_scanning | 6,030,134,110,848 | 22b5fb733b19e8f9655f82936707c8377ee0cc46 | 5f535b35375d68f407ee2f1153b97b686c9a8365 | /aircraft_scanning_control/scripts/ugv_trajectory.py | 38027ad4bbcba363f8bf6dabc988f648c8a4e4a0 | [
"MIT"
] | permissive | https://github.com/suneric/aircraft_scanning | d32a0ba3e44a0954a1a6a4a283615ca142a4cee8 | 18c7deb8405eabecab643e7ebbda5f3a61e78393 | refs/heads/master | 2022-06-04T11:17:27.210208 | 2022-05-18T22:49:32 | 2022-05-18T22:49:32 | 239,586,464 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import rospy
import numpy as np
import os
import sys
from geometry_msgs.msg import Pose
from math import *
import transform
from transform import MMRobotTransform
class trajectory_defined:
def __init__(self):
self.trajectory = []
self.index = 0
self.transform_util = MMRobotTransform()
# self._create_trajectory()
file = os.path.join(sys.path[0],'../../aircraft_scanning_plan/trajectory/ugv/viewpoints.txt');
self._load_trajectory(file)
def completed(self):
return self.index >= len(self.trajectory)
def next_pose(self):
if self.completed():
print("done.")
return None
else:
pose = self.trajectory[self.index]
self.index = self.index + 1
print("traverse to ", pose)
return pose
def _load_trajectory(self,file):
print("load trajectory from", file)
with open(file,'r') as reader:
for line in reader.read().splitlines():
data = line.split(" ")
idx = int(data[0])
px = float(data[1])
py = float(data[2])
pz = float(data[3])
ox = float(data[4])
oy = float(data[5])
oz = float(data[6])
ow = float(data[7])
pose = Pose();
pose.position.x = px
pose.position.y = py
pose.position.z = pz
pose.orientation.x = ox
pose.orientation.y = oy
pose.orientation.z = oz
pose.orientation.w = ow
ugv, arm = self.transform_util.camera2mobilebase(pose)
self.trajectory.append([ugv,arm])
reader.close()
def _create_trajectory(self):
arm_joint = [0.0,1.57,0.0,-0.3,0.0,-1.87,0.0]
self.trajectory.append([(0,-28,0.5*pi),arm_joint])
self._create_updownpath([-27,-22.5],[1.0,-1.0],1.0,arm_joint)
# self._create_updownpath([-20,-8],[1.0,-1.0])
# self._create_updownpath([-8,8],[1.5,0,-1.5])
# self._create_updownpath([8,25],[1.0,-1.0])
# self.trajectory.append((0,26,0.5*pi))
# self.trajectory.append((0,27,0.5*pi))
def _create_updownpath(self, y_range, x_pos, offset, arm_joint):
i = 0
y = y_range[0]
while y < y_range[1]:
if np.mod(i,2) == 0:
for x in x_pos:
self.trajectory.append([(x,y,0.5*pi),arm_joint])
else:
for x in reversed(x_pos):
self.trajectory.append([(x,y,0.5*pi),arm_joint])
i += 1
y += offset
| UTF-8 | Python | false | false | 2,725 | py | 150 | ugv_trajectory.py | 36 | 0.509358 | 0.478165 | 0 | 80 | 33.0625 | 102 |
jianjunyue/python-learn-ml | 12,773,232,777,123 | 19ccc7360a84ef900ce2a5852083c6ed8127400f | eccbb87eefe632a1aa4eafb1e5581420ccf2224a | /tianchi/智能制造质量预测/Ridge_model.py | ca93a8c2908f01516b2156a760c521ec636a95a7 | [] | no_license | https://github.com/jianjunyue/python-learn-ml | 4191fc675d79830308fd06a62f16a23295a48d32 | 195df28b0b8b8b7dc78c57dd1a6a4505e48e499f | refs/heads/master | 2018-11-09T15:31:50.360084 | 2018-08-25T07:47:20 | 2018-08-25T07:47:20 | 102,184,768 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
#Python sklearn数据分析中常用方法
#http://blog.csdn.net/qq_16234613/article/details/76534673
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Ridge
# train_df = pd.read_excel('/Users/jianjun.yue/PycharmGItHub/data/智能制造质量预测/训练.xlsx',header=0,encoding='utf-8')
# y_train=train_df["Y"]
# y_train.to_csv("/Users/jianjun.yue/PycharmGItHub/data/智能制造质量预测/训练_y_train_20171216.csv",index=False)
X_train=pd.read_csv('/Users/jianjun.yue/PycharmGItHub/data/智能制造质量预测/训练_quantity_2017121622.csv',header=0)
# X_train.shape
y_train=pd.read_csv('/Users/jianjun.yue/PycharmGItHub/data/智能制造质量预测/训练_y_train_20171216.csv',header=0)
# train_df=train_df.drop(["ID","Y"], axis=1)
# quantity = [attr for attr in train_df.columns if train_df.dtypes[attr] != 'object'] # 数值变量集合
quantity = [attr for attr in X_train.columns if X_train.dtypes[attr] == 'float64'] # 数值变量集合
print(len(quantity))
print(quantity)
# print(X_train[quantity].head())
# X_train=X_train.drop(["750X1452"], axis=1)
print(X_train.shape)
print(y_train.shape)
# # print(len(X_train.columns))
# X_train=train_df[quantity]
# X_train.to_csv("/Users/jianjun.yue/PycharmGItHub/data/智能制造质量预测/训练_quantity_20171216.csv",index=False)
# X_train = Imputer().fit_transform(X_train)
# data1=np.isnan(X_train).any()
# print( anynull(data1).head())
# 检查数据中是否有缺失值
# print(type(np.isnan(X_train).any()))
# 2、删除有缺失值的行
# train.dropna(inplace=True)
num=0
# count=0
# for column in X_train.columns:
# try:
# count=count+1
# if X_train.dtypes[column] == 'float64':
# print(column + "::"+str(count)+"/"+str(num))
# X_train[column] = X_train[column].astype(float)
# print(X_train.dtypes[column])
# print(X_train[column][0])
# # X_train[column] = X_train[column].apply(lambda x:)
# # data['year'] = data.Date.apply(lambda x: x.split('-')[0])
# except Exception as err:
# print(column+":::"+err)
# # X_train[column] = X_train[column].fillna(0)
# # X_train = Imputer().fit_transform(train_df)
# for column in X_train.columns:
# try:
# count=count+1
# print(column + "::"+str(count)+"/"+str(num))
# X_train[column] = X_train[column].fillna(X_train[column].median())
# except Exception as err:
# print(column+":::"+err)
# X_train[column] = X_train[column].fillna(0)
# # X_train = Imputer().fit_transform(train_df)
# X_train.to_csv("/Users/jianjun.yue/PycharmGItHub/data/智能制造质量预测/训练_quantity_2017121622.csv",index=False)
#交叉验证,获取最优参数( 最终使用GridSearch获取最优参数 )
alphas=np.logspace(-3,2,50)
test_scores=[]
for alpha in alphas:
clf=Ridge(alpha)
test_score=np.sqrt(-cross_val_score(clf,X_train,y_train,cv=5,scoring="neg_mean_squared_error"))
test_scores.append(np.mean(test_score))
print(test_scores)
plt.plot(alphas,test_scores)
plt.title("Ridge Model")
plt.show()
| UTF-8 | Python | false | false | 3,324 | py | 228 | Ridge_model.py | 223 | 0.676299 | 0.646104 | 0 | 77 | 39 | 110 |
noironetworks/gbp-tempest-plugin | 5,085,241,283,657 | 1b657f0216a1d5d0de3451866aa4ddec1d32dc49 | c2c84be5ed5d326b24ab14c159edb7d1fee949b9 | /gbp_tempest_plugin/services/gbp/v2/json/nat_pool_client.py | a08282afedfb7bcc3c03be54f52fe9de07411ad3 | [] | no_license | https://github.com/noironetworks/gbp-tempest-plugin | d7e4e745d1ade2b2db34ccdc7ed7e129b57c5caf | 8b7640e82aa9b12ebee49177da0f7fe7ed2f699f | refs/heads/master | 2020-04-09T10:16:47.284119 | 2018-11-02T11:05:50 | 2018-11-02T11:05:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tempest.lib.common.utils import data_utils
from six.moves import http_client
from tempest.lib.common import rest_client
from oslo_serialization import jsonutils as json
from gbp_tempest_plugin.services.gbp.v2.json import base
class NATPoolClient(base.GbpClientV2Base):
"""API V2 Tempest REST client for GBP NAT Pool API"""
resource = "/grouppolicy/nat_pools"
def create_nat_pool(self, name, external_segment_id, ip_pool, **kwargs):
"""Create a NAT Pool"""
post_body = {'nat_pool': {'name': name, 'external_segment_id': external_segment_id, 'ip_pool': ip_pool}}
if kwargs.get('description'):
post_body['nat_pool']['description'] = kwargs.get('description')
post_body = json.dumps(post_body)
resp, body = self.post(self.get_uri(self.resource), post_body)
body = json.loads(body)
self.expected_success(http_client.CREATED, resp.status)
return rest_client.ResponseBody(resp, body)
def list_nat_pools(self):
"""List NAT Pools"""
resp, body = self.get(self.get_uri(self.resource))
body = json.loads(body)
self.expected_success(http_client.OK, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_nat_pool(self, id):
"""Delete a NAT Pool"""
resp, body = self.delete(self.get_uri(self.resource, id))
self.expected_success(http_client.NO_CONTENT, resp.status)
return rest_client.ResponseBody(resp, body)
def show_nat_pool(self, id):
"""Show a NAT Pool"""
resp, body = self.get(self.get_uri(self.resource, id))
body = json.loads(body)
self.expected_success(http_client.OK, resp.status)
return rest_client.ResponseBody(resp, body)
def update_nat_pool(self, id, **kwargs):
"""Update an existing External Policy"""
resp, body = self.put(self.get_uri(self.resource, id), json.dumps({'nat_pool':kwargs}))
body = json.loads(body)
self.expected_success(http_client.OK, resp.status)
return rest_client.ResponseBody(resp, body)
| UTF-8 | Python | false | false | 2,093 | py | 34 | nat_pool_client.py | 32 | 0.655041 | 0.653607 | 0 | 49 | 41.714286 | 112 |
tjudoubi/tbcnn_vector_js | 8,074,538,538,326 | 761d4e11271f95600074188bad1b144c73c6540b | be42e602d56238f8d316649fd711a1430577a5ef | /datasetforTBCCD-master/sampleJava.py | 4e5ae1355c64222c7655b0d8a03f02131ed23faf | [] | no_license | https://github.com/tjudoubi/tbcnn_vector_js | 0749513c76ef4fd4dac9a7be1b8a2aa5571f7de3 | 638c4a46d7d9c83637edc5b7ff1b0cc54ab4e2c1 | refs/heads/master | 2022-12-18T11:39:51.282433 | 2020-10-05T15:56:25 | 2020-10-05T15:56:25 | 301,389,163 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import javalang
from javalang.ast import Node
def _name(node):
return type(node).__name__
def dfsSearch_withid(children):
if not isinstance(children, (str, Node, list, tuple)):
return
if isinstance(children, (str, Node)):
if str(children) == '':
return
if str(children).startswith('"'):
return
if str(children).startswith("'"):
return
if str(children).startswith("/*"):
return
# ss = str(children)
global num_nodes
num_nodes += 1
listt1.append(children)
return
for child in children:
if isinstance(child, (str, Node, list, tuple)):
dfsSearch_withid(child)
def _traverse_treewithid(root):
global num_nodes
num_nodes = 1
queue = [root]
root_json = {
"node": _name(root),
"children": []
}
queue_json = [root_json]
while queue:
current_node = queue.pop(0)
current_node_json = queue_json.pop(0)
global listt1
listt1 = []
dfsSearch_withid(current_node.children)
children = listt1
for child in children:
child_json = {
"node": str(child),
"children": []
}
current_node_json['children'].append(child_json)
if isinstance(child, (Node)):
queue_json.append(child_json)
queue.append(child)
return root_json, num_nodes
def _pad_nobatch(children):
child_len = max([len(c) for n in children for c in n])
children = [[c + [0] * (child_len - len(c)) for c in sample] for sample in children]
return children
def dfsSearch_noid(children):
if not isinstance(children, (Node, list, tuple)):
return
if isinstance(children, Node):
global num_nodes
num_nodes+=1
listt1.append(children)
return
for child in children:
if isinstance(child, (Node, list, tuple)):
dfsSearch_noid(child)
def _traverse_tree_noid(root):
global num_nodes
num_nodes = 1
queue = [root]
root_json = {
"node": _name(root),
"children": []
}
queue_json = [root_json]
while queue:
current_node = queue.pop(0)
current_node_json = queue_json.pop(0)
global listt1
listt1=[]
dfsSearch_noid(current_node.children)
children = listt1
for child in children:
child_json = {
"node": str(child),
"children": []
}
current_node_json['children'].append(child_json)
queue_json.append(child_json)
queue.append(child)
return root_json, num_nodes
def _traverse_tree_noast(root):
global num_nodes
num_nodes = 1
queue = [root]
root_json = {
"node": _name(root),
"children": []
}
queue_json = [root_json]
while queue:
current_node = queue.pop(0)
current_node_json = queue_json.pop(0)
global listt1
listt1 = []
dfsSearch_withid(current_node.children)
children = listt1
for child in children:
if isinstance(child, (Node)):
child_json = {
"node": "AstNode",
"children": []
}
current_node_json['children'].append(child_json)
queue_json.append(child_json)
queue.append(child)
else:
child_json = {
"node": str(child),
"children": []
}
current_node_json['children'].append(child_json)
return root_json, num_nodes
def getData_nofinetune(l,dictt,embeddings):
nodes11 = []
children11 = []
nodes22 = []
children22 = []
label = l[2]
queue1 = [(dictt[l[0]], -1)]
while queue1:
node1, parent_ind1 = queue1.pop(0)
node_ind1 = len(nodes11)
queue1.extend([(child, node_ind1) for child in node1['children']])
children11.append([])
if parent_ind1 > -1:
children11[parent_ind1].append(node_ind1)
nodes11.append(embeddings[node1['node']])
queue2 = [(dictt[l[1]], -1)]
while queue2:
node2, parent_ind2 = queue2.pop(0)
node_ind2 = len(nodes22)
queue2.extend([(child, node_ind2) for child in node2['children']])
children22.append([])
if parent_ind2 > -1:
children22[parent_ind2].append(node_ind2)
nodes22.append(embeddings[node2['node']])
children111 = []
children222 = []
children111.append(children11)
children222.append(children22)
children1 = _pad_nobatch(children111)
children2 = _pad_nobatch(children222)
return [nodes11],children1,[nodes22],children2,label
def getData_finetune(l,dictt,embeddings):
nodes11 = []
children11 = []
nodes22 = []
children22 = []
label = l[2]
queue1 = [(dictt[l[0]], -1)]
while queue1:
node1, parent_ind1 = queue1.pop(0)
node_ind1 = len(nodes11)
queue1.extend([(child, node_ind1) for child in node1['children']])
children11.append([])
if parent_ind1 > -1:
children11[parent_ind1].append(node_ind1)
nodes11.append(embeddings[node1['node']])
queue2 = [(dictt[l[1]], -1)]
while queue2:
node2, parent_ind2 = queue2.pop(0)
node_ind2 = len(nodes22)
queue2.extend([(child, node_ind2) for child in node2['children']])
children22.append([])
if parent_ind2 > -1:
children22[parent_ind2].append(node_ind2)
nodes22.append(embeddings[node2['node']])
children111 = []
children222 = []
batch_labels = []
children111.append(children11)
children222.append(children22)
children1 = _pad_nobatch(children111)
children2 = _pad_nobatch(children222)
batch_labels.append(label)
return nodes11,children1,nodes22,children2,batch_labels | UTF-8 | Python | false | false | 6,033 | py | 2,659 | sampleJava.py | 136 | 0.555777 | 0.52246 | 0 | 206 | 28.291262 | 88 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.