repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
awelzel/influxpy | 4,818,953,337,795 | b043587373df06bf2ebd5c583f30ff2f1e8787c3 | 55a4aab4f21713ee1f43005a053c2b702fbf2661 | /tests/test_handler.py | 27bbea6eafede7e007c6b2afa907f25b1302c206 | [
"BSD-3-Clause"
]
| permissive | https://github.com/awelzel/influxpy | 8805cca2ea89d7c6e2655dfdd1dfeb3c53d7e410 | 773179ff56a1128b246a63736483b9e3bc76f083 | refs/heads/master | 2020-03-25T07:04:37.177208 | 2018-08-05T21:48:04 | 2018-08-05T22:03:54 | 143,539,960 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import socket
import unittest
from unittest import mock
from influxpy import UDPHandler
class TestHandler(unittest.TestCase):
"""
We mock UDP socket of the DatagramHandler and look at the sendto() call.
"""
def setUp(self):
self.logger = logging.getLogger("test_logger")
for h in self.logger.handlers[:]:
self.logger.removeHandler(h)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
self.sock_mock = mock.Mock(spec=socket.socket)
self.handler = None
def tearDown(self):
if self.handler:
self.handler.close()
def test__log_defaults(self):
self.handler = UDPHandler("127 0 0 1", -1, "test", sock=self.sock_mock)
self.logger.addHandler(self.handler)
self.logger.info("Hello")
self.sock_mock.sendto.assert_called_once()
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertRegex(data, "^test,")
self.assertRegex(data, " [0-9]{15,}$")
self.assertIn("level_name=INFO", data)
self.assertIn('message="Hello"', data)
self.assertNotIn('function=', data)
def test__log_debugging_fields_true(self):
self.handler = UDPHandler("127 0 0 1", -1, "xyz",
debugging_fields=True,
sock=self.sock_mock)
self.logger.addHandler(self.handler)
self.logger.info("A very long message.")
self.sock_mock.sendto.assert_called_once()
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn("pid=", data)
self.assertIn('function="test_', data)
def test__log_utf8_message(self):
self.handler = UDPHandler("127 0 0 1", -1, "xyz",
sock=self.sock_mock)
self.logger.addHandler(self.handler)
self.logger.info("Yes: " + b"\xE2\x9C\x8C".decode("utf-8"))
self.sock_mock.sendto.assert_called_once()
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn('message="Yes: \u270C"', data)
def test__localname(self):
self.handler = UDPHandler("127 0 0 1", -1, "A",
localname="test-host",
sock=self.sock_mock)
self.logger.addHandler(self.handler)
self.logger.info("Aloha")
self.sock_mock.sendto.assert_called_once()
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn("host=test-host", data)
def test__mocked_gethostbyname(self):
self.handler = UDPHandler("127 0 0 1", -1, "A",
sock=self.sock_mock)
self.logger.addHandler(self.handler)
with mock.patch("socket.gethostname") as m:
m.return_value = "mock-host"
self.logger.info("Aloha")
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn("host=mock-host", data)
def test__mocked_getfqdn(self):
self.handler = UDPHandler("127 0 0 1", -1, "A",
fqdn=True,
sock=self.sock_mock)
self.logger.addHandler(self.handler)
with mock.patch("socket.getfqdn") as m:
m.return_value = "fqdn-host"
self.logger.info("Aloha")
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn("host=fqdn-host", data)
def test__extra_fields(self):
self.handler = UDPHandler("127 0 0 1", -1, "xyz",
sock=self.sock_mock)
self.logger.addHandler(self.handler)
extra = {
"emails_processed": 10,
"disk_utilization": 73.1,
}
self.logger.info("Processed 10 mails", extra=extra)
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn('emails_processed=10i', data)
self.assertIn('disk_utilization=73.1', data)
def test__extra_fields_disabled(self):
self.handler = UDPHandler("127 0 0 1", -1, "X",
extra_fields=False,
sock=self.sock_mock)
self.logger.addHandler(self.handler)
extra = {
"emails_processed": 10,
"disk_utilization": 73.1,
}
self.logger.info("Processed 10 mails", extra=extra)
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertNotIn('emails_processed', data)
self.assertNotIn('disk_utilization', data)
def test__global_tags(self):
self.handler = UDPHandler("127 0 0 1", -1, "X",
global_tags={
"datacenter": "us-west",
},
sock=self.sock_mock)
self.logger.addHandler(self.handler)
self.logger.info("I'm not the only one here!")
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertRegex(data, r'^[^ ]+datacenter=us-west.* .*[0-9]+$')
def test__log_exception(self):
self.handler = UDPHandler("127 0 0 1", -1, "X",
sock=self.sock_mock)
self.logger.addHandler(self.handler)
try:
puff_the_magic_dragon()
except NameError:
self.logger.exception("No dragons here.")
data, to = self.sock_mock.sendto.call_args[0]
data = data.decode("utf-8")
self.assertIn('message="No dragons here."', data)
full_msg = (
'full_message="No dragons here.\n'
'Traceback (most recent call last):\n'
)
self.assertIn(full_msg, data)
def test__using_reserved_global_tag_fails(self):
with self.assertRaisesRegex(ValueError, "host.*in global_tags impossible"):
UDPHandler("127 0 0 1", -1, "X", global_tags={"host": "reserved"})
| UTF-8 | Python | false | false | 6,198 | py | 8 | test_handler.py | 6 | 0.549209 | 0.528235 | 0.000484 | 156 | 38.730769 | 83 |
kmcheung12/advent-of-code-2019 | 13,804,024,931,286 | 557ddc47b23d5c89a9526ee597cb61dd9465a228 | a3d37c6ca273bf826a36565122d896f4cb760f5d | /day1.py | 738a1d27d5b6ee68496ee7f164e6097730c45e14 | []
| no_license | https://github.com/kmcheung12/advent-of-code-2019 | d99332e0f9f5505b9a1846a72693af0cf0cee635 | 3daafb2f7fc9e3b81eaac5e3a6ece9288bd87787 | refs/heads/master | 2020-09-26T04:37:19.047272 | 2019-12-08T17:01:01 | 2019-12-08T17:01:01 | 226,166,853 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def fuel(mass, acc=0):
s = int(mass/3) - 2
if s < 9:
return s + acc
else:
return fuel(s, s + acc)
def compute(params):
return sum(fuel(p) for p in params)
def preprocess(inputs):
return map(int, inputs)
if __name__ == '__main__':
raw = sys.argv[1:]
params = preprocess(raw)
output = compute(params)
print(output)
| UTF-8 | Python | false | false | 381 | py | 8 | day1.py | 8 | 0.566929 | 0.553806 | 0 | 19 | 19.052632 | 39 |
simonsben/intent_detection | 12,025,908,450,286 | c4e6626a2a53fb59ee84b4bef59a984246876fc0 | 66e7fdfb8498c7573719dda5e3f7744a544bec80 | /model/layers/realtime_embedding.py | 8a502ee5679c33ee444acee9668677ed1ba45671 | []
| no_license | https://github.com/simonsben/intent_detection | d172976ead7d9d7d33f2ad0803c92cdc1afe28e3 | 7ca7e4e1f0e7ec92c034f459a8a8b199d37dfb17 | refs/heads/master | 2023-07-14T18:47:48.077452 | 2020-09-18T16:21:58 | 2020-09-18T16:21:58 | 257,970,547 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fasttext.FastText import _FastText
from tensorflow.keras.utils import Sequence
from numpy import zeros, ones, ndarray, abs
from config import batch_size, max_tokens
from math import ceil
class RealtimeEmbedding(Sequence):
""" Extends TensorFlow Sequence to provide on-the-fly fastText token embedding """
def __init__(self, embedding_model, data_source, labels=None, uniform_weights=False):
"""
Implements Keras data sequence for on-the-fly embedding generation
:param _FastText embedding_model: FastText embedding model
:param ndarray data_source: List of documents to embed on the fly
:param ndarray labels: Array of data labels
:param bool labels_in_progress: Whether passed labels should be taken as initial labels and marked
:param bool uniform_weights: Whether weights should be uniform (i.e. 1)
"""
self.embedding_model = embedding_model
self.embedding_dimension = embedding_model.get_dimension()
self.embedding_cache = {}
self.data_source = data_source
self.working_data_source = self.data_source
self.labels = labels
self.working_labels = self.labels
self.working_mask = None
self.is_training = False
self.concrete_weight = 1
self.midpoint = 0.5
self.uniform_weights = uniform_weights
self.data_length = ceil(len(self.working_data_source) / batch_size)
def update_labels(self, new_labels):
""" Updates the labels being fed """
self.labels = new_labels.copy()
self.set_mask(self.working_mask)
# TODO add line to automatically mask uncertain values when training
def set_usage_mode(self, is_training):
""" Changes usage mode """
if is_training is True and self.labels is None:
raise AttributeError('Cannot use in training mode if there are no labels.')
self.is_training = is_training
def set_mask(self, definite_mask):
""" Updates the current mask being applied to the data """
self.working_mask = definite_mask
if self.working_mask is not None: # If not None, apply mask to data
self.working_data_source = self.data_source[self.working_mask]
self.working_labels = self.labels[self.working_mask]
# If updated mask is None, make working set entire set
else:
self.working_data_source = self.data_source
self.working_labels = self.labels
# Recompute data length
self.data_length = ceil(len(self.working_data_source) / batch_size)
def get_sample_weights(self, batch_start, batch_end):
"""
Returns sample weights for data samples.
Weights are computed using the function w = 2(x - .5) when x = (.5, 1], and the negation when x = [0, .5)
"""
if self.uniform_weights:
return ones(batch_end - batch_start)
labels = self.working_labels[batch_start:batch_end]
weights = compute_sample_weights(labels, self.midpoint)
return weights
def embed_data(self, data_subset):
""" Computes word embeddings for provided data subset """
# Initialize embedding of data
embedded_data = zeros((data_subset.shape[0], max_tokens, self.embedding_dimension), float)
# Embed all documents
for doc_index, document in enumerate(data_subset):
document_tokens = document.split(' ')[:max_tokens] # Split document into tokens and limit
# For each token in document
for token_index, token in enumerate(document_tokens):
# If token embedding is not already cached, compute it and store
if token not in self.embedding_cache:
self.embedding_cache[token] = self.embedding_model.get_word_vector(token)
# Add embedding to array
embedded_data[doc_index, token_index] = self.embedding_cache[token]
return embedded_data
def __len__(self):
""" Overrides length method to compute the length in batches """
if self.is_training:
return self.data_length
return ceil(len(self.data_source) / batch_size)
def __getitem__(self, index):
""" Provides the batch of data at a given index """
batch_start = int(index * batch_size)
batch_end = batch_start + batch_size
# Get batch of data
source = self.working_data_source if self.is_training else self.data_source
working_data = source[batch_start:batch_end]
# Correct batch end to reflect current atch index
if batch_end > len(source):
batch_end = len(source)
embedded_data = self.embed_data(working_data)
# If training also return labels
if self.is_training:
# Get batch labels and convert to boolean
label_subset = self.working_labels[batch_start:batch_end]
label_subset = label_subset > self.midpoint
loss_weights = self.get_sample_weights(batch_start, batch_end)
return embedded_data, label_subset, loss_weights
return embedded_data
def compute_sample_weights(labels, midpoint=0.5):
"""
Computes sample weights for training
:param ndarray labels: Array of current labels
:param float midpoint: Midpoint for computing the loss weight around
"""
return 2 * abs(labels - midpoint)
| UTF-8 | Python | false | false | 5,610 | py | 165 | realtime_embedding.py | 153 | 0.627451 | 0.624955 | 0 | 143 | 37.230769 | 113 |
glujan/lapka | 7,404,523,653,016 | 5b25bb2e7d6282c3b3bb8e3676047bc9a9100043 | 642a6d97425dfc04ebd67de99b2eb8b4b96d1c48 | /lapka/fetch.py | 5ad86a2825cf143aa27e2d9b86c46387ab097df1 | [
"MIT"
]
| permissive | https://github.com/glujan/lapka | 29cc286a993cea5dc2c5d0834627190568dcb0a3 | 6ae8202e8c54210becb306cb87d9a3de2b9c3626 | refs/heads/master | 2020-05-28T09:49:11.547668 | 2018-06-22T07:41:23 | 2018-06-22T07:42:44 | 82,586,331 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Fetch and parse data from shelters' websites to a common format."""
import asyncio
from functools import partial
from urllib.parse import urljoin, urlparse
import aiohttp
from lxml import etree
class Shelter:
"""Base class for extracting data about from shelters websites."""
animal_url = ""
""""XPath expression to find URLs to animals profiles"""
next_url = ""
"""XPath expression to find next website in pagination"""
start_url = ""
"""URL to the beginning of animals list"""
def __init__(self, session=None):
"""Initialize a new SchroniskoWroclawPl instance."""
self.session = session
url = urlparse(self.start_url)
self._full_url = partial(urljoin, f"{url.scheme}://{url.netloc}/")
"""Transform partial URL to a full one. Use with instance of this class."""
async def parse(self, session=None):
"""Crawl shelter's website and return data in an unified format."""
if session:
self.session = session
async def task(url):
async with self.session.get(url) as resp:
content = await resp.text()
data = self._parse(content)
data['url'] = url
return data
coros = [task(a_url) async for a_url in self._animals_urls()]
return await asyncio.gather(*coros)
async def _animals_urls(self):
url = self.start_url
while url:
async with self.session.get(url) as resp:
content = await resp.read()
doc = etree.HTML(content)
for a_url in doc.xpath(self.animal_url):
yield self._full_url(a_url)
try:
new_url = self._full_url(doc.xpath(self.next_url)[0])
url = new_url if new_url != url else None
except IndexError:
url = None
def _parse(self, content: str) -> dict:
"""Extract data from animal's page."""
raise NotImplementedError
class SchroniskoWroclawPl(Shelter):
"""Extract data about animals from schroniskowroclaw.pl website."""
animal_url = "//div[@class='filter-item']//h5/a/@href"
next_url = "//ul[@class='page-numbers']//a[@class='next page-numbers']/@href"
start_url = "http://schroniskowroclaw.pl/zwierzeta-do-adopcji/"
def _parse(self, content):
html = etree.HTML(content)
if html is None:
return {}
try:
doc = html.xpath("//div[@class='project']")[0]
name = doc.xpath("//div[@class='project-details']//h1/text()")[0].split(' ')[0]
a_id, since, *other = doc.xpath("//*[@class='project-info']//span/text()")
category = other[0] if other else None # FIXME Normalize categories
photos = map(self._full_url, doc.xpath("//div[@class='project-slider']//img/@src"))
data = {
'name': name.strip().title(),
'id': a_id,
'since': since,
'category': category,
'photos': list(photos),
'description': doc.xpath("//div[@class='project-details']/p/text()"),
}
except (IndexError, etree.XMLSyntaxError):
# TODO Add logging that couldn't parse website
data = {}
return data
class NaPaluchuWawPl(Shelter):
"""Extract data about animals from schroniskowroclaw.pl website."""
animal_url = "//div[@id='ani_search_result_cont']//a[@class='animals_btn_list_more']/@href"
next_url = "//div[@class='pagination']/a[@class='next']/@href"
start_url = "http://www.napaluchu.waw.pl/czekam_na_ciebie/wszystkie_zwierzeta_do_adopcji"
def _parse(self, content):
html = etree.HTML(content)
if html is None:
return {}
try:
doc = html.xpath("//div[@class='ani_one_container']")[0]
name = doc.xpath("//h5/text()")[0]
category, *_, since, a_id = doc.xpath("//div[@class='info']//span[not(@class)]/text()")
photos = map(self._full_url, doc.xpath("//div[@id='main_image_cont']//a/@href"))
raw_desc = doc.xpath("//div[@class='description']//text()")
desc = filter(bool, map(lambda s: s.strip(), raw_desc))
data = {
'name': name.strip().title(),
'id': a_id.split(': ')[-1],
'since': since.strip().split(': ')[-1],
'category': category.strip().split(': ')[-1], # TODO Normalize
'photos': list(photos),
'description': list(desc),
}
except (IndexError, etree.XMLSyntaxError):
# TODO Add logging that couldn't parse website
data = {}
return data
if __name__ == '__main__':
async def _main():
import pickle
from itertools import chain
from lapka import models
async with aiohttp.ClientSession() as session:
tasks = (sh.parse(session) for sh in (SchroniskoWroclawPl(), NaPaluchuWawPl()))
fetched = await asyncio.gather(*tasks)
animals = [models.AnimalDummy(**data) for data in chain(*fetched)]
print('Gathered', len(animals), 'animals')
with open(models._pickle_path, 'wb') as fp:
pickle.dump(animals, fp)
loop = asyncio.get_event_loop()
loop.run_until_complete(_main())
loop.close()
| UTF-8 | Python | false | false | 5,417 | py | 30 | fetch.py | 15 | 0.560273 | 0.557873 | 0 | 156 | 33.724359 | 99 |
alsandiego/UCI_Bootcamp_Project2 | 1,288,490,210,644 | 5755a137f4f40ecdc3cbb8e75c3ea9a776a50c60 | 9c576e3cd37d29579b4053d9de876232f5e18224 | /app.py | 56ae7403a8ad4476d69766fcea7f834f2428eaf8 | []
| no_license | https://github.com/alsandiego/UCI_Bootcamp_Project2 | cacea93d0ff01f59b7dbb45a23e2f70c5d54f9a9 | 04c619116cd870d5d605d3558610dd36ab8d3da9 | refs/heads/master | 2020-11-26T15:08:46.377880 | 2020-01-14T03:46:58 | 2020-01-14T03:46:58 | 229,116,719 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
#################################################
# Database Setup
#################################################
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db/oc_salary_db.sqlite"
db = SQLAlchemy(app)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(db.engine, reflect=True)
# Save references to each table
oc_salary_db = Base.classes.oc_salary
@app.route("/")
def index():
"""Return the homepage."""
return render_template("index.html")
@app.route("/page2")
def index2():
"""Return the homepage."""
return render_template("page2.html")
@app.route("/year")
def year():
"""Return a list of years."""
# Use Pandas to perform the sql query
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind, coerce_float=False)
# Return a list of the column names (sample names)
return jsonify(list(df.columns)[3:])
# shows the department salary total for selected year
@app.route("/year/<year>")
def year_dept_total(year):
"""Return department total for selected year"""
# Use Pandas to perform the sql query
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind)
# sample_data = df.loc[(df[year] > 0), ["department", year]]
groupby_sum = df.groupby(["department"])[year].sum().reset_index()
# Sort by sample
groupby_sum.sort_values(by="department", ascending=True, inplace=True)
# Format the data to send as json
data = {
"department": groupby_sum.department.tolist(),
"total_salary": groupby_sum[year].values.tolist(),
}
return jsonify(data)
# shows salary and position for selected year and department
@app.route("/<yr>/<dept>")
def year_dept(yr, dept):
"""Return department salary for year selected."""
# Use Pandas to perform the sql query
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind)
# print(dept.upper())
dept_total = df.loc[(df[yr] >0) & (df["department"] == dept.upper()), [dept, "position", yr]]
# Format the data to send as json
data = {
"salary": dept_total[yr].values.tolist(),
"position": dept_total.position.tolist()
}
return jsonify(data)
@app.route("/department")
def department():
"""Return a list of departments."""
# Use Pandas to perform the sql query
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query("select department from oc_salary group by department", db.session.bind, coerce_float=False)
# Return a list of the column names (sample names)
return jsonify(list(df["department"].values))
@app.route("/department/<year>")
def dept(year):
"""Return department, position."""
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
# sample_data = df.loc[(df[year] >0) & (df["department"]== "ACCOUNTING"), ["department", "position", year]]
sample_data = df.loc[(df[year] >0), ["position", year]]
# Sort by sample
sample_data.sort_values(by="position", ascending=True, inplace=True)
# Format the data to send as json
data = {
"position": sample_data.position.tolist(),
"salary": sample_data[year].values.tolist()
}
return jsonify(data)
@app.route("/position/<year>")
def position(year):
"""Return department, position."""
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
# sample_data = df.loc[(df[year] >0) & (df["department"]== "ACCOUNTING"), ["department", "position", year]]
sample_data = df.loc[(df[year] >0), ["department", "position", year]]
# Sort by sample
sample_data.sort_values(by="department", ascending=True, inplace=True)
# Format the data to send as json
data = {
"department": sample_data.department.values.tolist(),
"salary": sample_data[year].values.tolist(),
"position": sample_data.position.tolist(),
}
return jsonify(data)
# shows salary for selected department and position
@app.route("/bubble/<department>/<position>")
def bubblechart(department, position):
"""Return all salary for dept and position selected."""
# Use Pandas to perform the sql query
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind)
# print(dept.upper())
dept_total = df.loc[(df["department"] == department.upper()) & (df["position"] == position.upper()), [department, position, "2014","2015","2016","2017","2018"]]
# Format the data to send as json
data = {
"2014": dept_total["2014"].tolist(),
"2015": dept_total["2015"].tolist(),
"2016": dept_total["2016"].tolist(),
"2017": dept_total["2017"].tolist(),
"2018": dept_total["2018"].tolist()
}
return jsonify(data)
@app.route("/bubble2/<department>")
def bubble2(department):
"""Return department, position."""
stmt = db.session.query(oc_salary_db).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
# sample_data = df.loc[(df[year] >0) & (df["department"]== "ACCOUNTING"), ["department", "position", year]]
sample_data = df.loc[(df["department"] == department.upper()), ["position", "2014","2015","2016","2017","2018"]]
# Sort by sample
sample_data.sort_values(by="position", ascending=True, inplace=True)
# Format the data to send as json
data = {
"position": sample_data.position.tolist(),
"2014": sample_data["2014"].tolist(),
"2015": sample_data["2015"].tolist(),
"2016": sample_data["2016"].tolist(),
"2017": sample_data["2017"].tolist(),
"2018": sample_data["2018"].tolist()
}
return jsonify(data)
if __name__ == "__main__":
app.run(debug=True) | UTF-8 | Python | false | false | 6,471 | py | 5 | app.py | 3 | 0.639005 | 0.617988 | 0 | 196 | 32.020408 | 164 |
juanjavierlimachi/mipagina | 19,121,194,415,997 | 6e64bf6289b3f30d3dba977832f5c73f63dc9ae9 | 7ac31c08e3727297d04eb8dbdc1e7d48b0bd2fed | /mipagina/apps/proveedor/models.py | a1ad2d0d9a5f6c170757b4f1488221321ffd5d27 | []
| no_license | https://github.com/juanjavierlimachi/mipagina | 06cc8b7748b474f5606fa12ef5874583531c3a80 | 5e9c0ab89bac40edff43b86675a72860442c79f4 | refs/heads/master | 2021-01-20T05:29:33.600400 | 2017-05-27T15:58:49 | 2017-05-27T15:58:49 | 89,789,828 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
# Create your models here.
class Proveedor(models.Model):
Nombre_Razon_Social=models.CharField(max_length=200, unique=True)
Nit=models.PositiveIntegerField(max_length=15, unique=True)
Telefono=models.PositiveIntegerField(max_length=8, unique=True)
Direccion=models.CharField(max_length=150)
Email=models.EmailField(unique=True)
fecha_registro = models.DateTimeField(auto_now=True)
estado=models.IntegerField(default=0)
def __unicode__(self):
return "%s Nit:%s Telf:%s"%(self.Nombre_Razon_Social,self.Nit,self.Telefono)
| UTF-8 | Python | false | false | 558 | py | 64 | models.py | 16 | 0.786738 | 0.768817 | 0 | 12 | 45.5 | 78 |
yeyeto2788/GetTheUmbrella | 498,216,209,102 | a19a6f6a8c4aa517375816b341fb7a545ae53cf8 | 4a58a20a8bf66832d62cef42e61fb5504808656d | /GetTheUmbrella.py | 309f9e899a4f9fde28041f6f79e5cbf01335454b | []
| no_license | https://github.com/yeyeto2788/GetTheUmbrella | 2ad6146ff1f3655455b4fa3d5561a68b8f032e8b | 68f30d47a37b30d8af5234c6d73c976ffaa54998 | refs/heads/master | 2020-02-21T16:42:13.897157 | 2018-10-08T12:41:22 | 2018-10-08T12:41:22 | 99,704,472 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import time
import json
import inspect
import logging
import smtplib
import requests
import datetime
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class GetUmbrella(object):
def __init__(self):
filename = inspect.getframeinfo(inspect.currentframe()).filename
script_path = os.path.dirname(os.path.abspath(filename))
self.script_path = script_path
self.config = self.load_config()
self.blnSend = 0
self.send_mail(self.parse_conditions(self.get_cities_forecast()))
def load_config(self):
"""
Load configuration from the config.json file.
Returns:
json object with configuration.
"""
config_file = "config.json"
script_root = self.script_path
with open(os.path.join(script_root, config_file), "r") as ConfigFile:
return json.load(ConfigFile)
def get_cities_forecast(self):
"""
Retrieve the forecast for several cities
Returns:
cities_conditions: List with the city list conditions
"""
cities_conditions = []
for city in self.config["openweathermap"]["cities"]:
cities_conditions.append([city, self.get_forecast(city)])
return cities_conditions
def parse_conditions(self, lst_conditions):
"""
This will parse the weather conditions and convert it into html to me sent.
Args:
lst_conditions: List with weather conditions
Returns:
msg: String with formatted HTML text.
"""
msg = "Weather conditions for tomorrow.<br>"
for iteraction, cities in enumerate(lst_conditions):
city = lst_conditions[iteraction][0]
city_conditions = lst_conditions[iteraction][1]
if len(city_conditions) > 1:
msg += "<br>In <b>{}</b>:<br>".format(city)
for condition in city_conditions:
condition_date = condition[0]
min_tem = condition[1]
max_temp = condition[2]
strcondition = condition[3]
icon = condition[4]
msg += """<br>At <b>{}</b> there will be <b>{}</b>
<br>with the following temperatures:<br>
<b>Minimum temperature:</b>{}<br>
<b>Maximum temperature:</b> {}<br>
<img src="cid:{}"><br>""".format(condition_date, strcondition, min_tem, max_temp, icon)
return msg
def get_forecast(self, strcity):
"""
This function will get 5 days forecast weather in Barcelona
to sent through the Bot
Args:
strcity: String with the city name you want to retrieve the forecast from.
Returns:
conditions: list with the conditions retrieved
"""
conditions = []
r = requests.get("http://api.openweathermap.org/data/2.5/forecast?q={}&appid={}".format(strcity, self.config["openweathermap"]["API_KEY"])).json()
umbrella_conditions = ("heavy rain", "light rain", "rain", "moderate rain", "shower rain", "thunderstorm")
for i in range(len(r["list"])):
min_temp = int(r["list"][i]["main"]["temp_min"] - 273.15)
max_temp = int(r["list"][i]["main"]["temp_max"] - 273.15)
Date = str(r["list"][i]["dt_txt"])
condition = str(r["list"][i]["weather"][0]["description"])
icon = str(r["list"][i]["weather"][0]["icon"])
tomorrow = str(datetime.date.today() + datetime.timedelta(days=1))
if Date.split(" ")[0] == tomorrow:
if condition in umbrella_conditions:
self.blnSend = 1
conditions.append([Date, min_temp, max_temp, condition, icon])
return conditions
def log(self, strlog):
"""
This function will log every command and msg received by the bot or any
other info the user wants to log on the file.
Args:
strlog: String with the message or data to want to write on the file.
Returns:
Nothing.
"""
if self.config['log']:
FORMAT = '%(levelname)s,%(name)s,%(message)s'
logging.basicConfig(format=FORMAT, filename=os.path.join(self.script_path, 'LogFile.log'), level=logging.INFO)
time_now = time.strftime("%Y-%m-%d,%H:%M:%S", time.gmtime())
logging.info(time_now + "," + strlog)
def send_mail(self, strmsg):
"""
Send the mail warning about weather condition
Args:
strmsg: String with the message to be sent.
Returns:
Nothing.
"""
if self.blnSend:
# Email of sender data.
from_email = self.config["emails"]["from"]
email_passwd = self.config["emails"]["password"]
to_emails = self.config["emails"]["to"]
for to_email in to_emails:
# Compose the email to be sent
message = MIMEMultipart()
message['Subject'] = 'Rain Alert!!!'
message['From'] = from_email
message['To'] = to_email
# Read image to sent
for icon in self.config['icons']:
imageToMail = os.path.normpath(os.path.join(self.script_path, "icons", icon+".png"))
with open(imageToMail, 'rb') as fp:
img = MIMEImage(fp.read())
img.add_header('Content-ID', '<{}>'.format(icon))
fp.close()
message.attach(img)
# Add text to message
msgText = MIMEText('<p>{}</p><br><br>'.format(strmsg), 'html')
message.attach(msgText)
# The actual mail send
server = smtplib.SMTP("smtp.gmail.com:587")
server.starttls()
server.login(from_email, email_passwd)
server.sendmail(from_email, to_email, str(message))
server.quit()
self.log("Email sent to {} .".format(to_email))
else:
self.log("No email sent since there were no conditions to get the umbrella.")
"""
Execute the code
"""
Umbrella = GetUmbrella()
| UTF-8 | Python | false | false | 6,520 | py | 3 | GetTheUmbrella.py | 1 | 0.549233 | 0.544479 | 0 | 180 | 35.222222 | 154 |
BCHoagland/Sutton-and-Barto | 10,368,051,072,749 | 2572abe07aa6f102f53623cb5acb476e62a25e12 | 49ef500e0921d114963d48844b353fd8de15c4eb | /08 - Planning and Learning with Tabular Methods/priority_sweeping/envs/shortcut_env.py | af357d6d1cc1933360f0683a3993eba5f415f389 | []
| no_license | https://github.com/BCHoagland/Sutton-and-Barto | d282a8b493ad45f8c3e444eec095cb7d26d558f2 | ec4a049820b6098462e9aadd43109fc6c83d0fc5 | refs/heads/master | 2020-04-17T02:36:38.925765 | 2019-03-08T18:48:57 | 2019-03-08T18:48:57 | 166,143,693 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
class ShortcutEnv():
def __init__(self):
self.name = 'Shortcut Maze'
self.width = 9
self.height = 6
self.start_pos = np.array([3, 0])
self.goal = np.array([self.width-1, self.height-1])
self.old_obstacles = [[1, 2], [2, 2], [3, 2], [4, 2], [5, 2], [6, 2], [7, 2], [8, 2]]
self.new_obstacles = [[1, 2], [2, 2], [3, 2], [4, 2], [5, 2], [6, 2], [7, 2]]
self.obstacles = self.old_obstacles
self.actions = [[0, 1], [1, 0], [0, -1], [-1, 0]]
self.actions = [np.array(a) for a in self.actions]
self.t = 0
def reset_t(self):
self.t = 0
self.obstacles = self.old_obstacles
def reset(self):
self.pos = self.start_pos
return self.pos
def at_goal(self):
return tuple(self.pos) == tuple(self.goal)
def step(self, a):
self.t += 1
if self.t == 3000:
self.obstacles = self.new_obstacles
next_pos = np.clip(self.pos + self.actions[a], 0, [self.width-1, self.height-1])
if next_pos.tolist() not in self.obstacles:
self.pos = next_pos
if self.at_goal():
return self.pos, 1, True
return self.pos, 0, False
| UTF-8 | Python | false | false | 1,247 | py | 81 | shortcut_env.py | 48 | 0.510024 | 0.465116 | 0 | 42 | 28.690476 | 93 |
doolieSoft/djangobourse | 14,061,722,976,424 | 5dc4aef8fbee2148937583584107b0224e5d335a | 2eb06ad3b2cae9dc25d5332350a514b8b75d25af | /src/djangobourse/migrations/0012_auto_20190107_1745.py | 628d2c2a91f3ee1b25017ac586e684c11f848979 | []
| no_license | https://github.com/doolieSoft/djangobourse | 470c2d71a4f386a97b9c6f218f82f31d5384be7a | da921defaf6ef44ac621065a63b060cd79bfb1d6 | refs/heads/master | 2020-04-16T08:24:59.925043 | 2019-01-12T18:56:59 | 2019-01-12T18:56:59 | 165,424,276 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.5 on 2019-01-07 16:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('djangobourse', '0011_share_valeur'),
]
operations = [
migrations.AlterField(
model_name='share',
name='action',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='djangobourse.ActionSignaletique'),
),
migrations.AlterField(
model_name='share',
name='portefeuille',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='djangobourse.Portefeuille'),
),
migrations.AlterField(
model_name='transaction',
name='share',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='djangobourse.Share'),
),
]
| UTF-8 | Python | false | false | 940 | py | 21 | 0012_auto_20190107_1745.py | 18 | 0.623404 | 0.603191 | 0 | 29 | 31.413793 | 122 |
gayatribasude/GayatrisWorld | 3,556,232,949,513 | cae92af273f1a5efe31081ee9599e0c76cea9427 | 8d3835e39cbc2c74d8535b809686d6ab3033c0d0 | /ecommerce/carts/migrations/0029_auto_20210731_2132.py | 1da0307730c4529cce0432ee8019cff46ff0e1d6 | []
| no_license | https://github.com/gayatribasude/GayatrisWorld | 125698955cd8b98a5aa2377331293587a57f2911 | 552ea2ef946e95f5bccc4e51d4030484ab0bc438 | refs/heads/master | 2023-06-25T19:45:03.232059 | 2021-08-02T16:43:47 | 2021-08-02T16:43:47 | 384,343,617 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1.3 on 2021-07-31 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0028_auto_20190130_2301'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='items',
field=models.ManyToManyField(blank=True, to='carts.Item'),
),
]
| UTF-8 | Python | false | false | 401 | py | 93 | 0029_auto_20210731_2132.py | 66 | 0.588529 | 0.511222 | 0 | 18 | 21.277778 | 70 |
dekasthiti/pythonic | 5,686,536,735,973 | 51b9926d5b21da5b3d358082c09a0cbffd72b447 | d550cc0b4a8567d75da3d6e923533271309c6347 | /pythonic.py | f27ccfb99c9d65965b9dde19f3c0d9f9936579e5 | []
| no_license | https://github.com/dekasthiti/pythonic | 23349638caf4e70ca65b89b6b5a1f184580f2235 | f7d28ff787a27af828b46e4cc3bd52acfbf98f37 | refs/heads/master | 2023-06-14T08:50:46.108612 | 2021-07-11T00:45:10 | 2021-07-11T00:45:10 | 103,495,862 | 0 | 0 | null | false | 2021-07-11T00:24:42 | 2017-09-14T06:42:50 | 2021-07-10T22:59:02 | 2021-07-11T00:24:41 | 53 | 0 | 0 | 0 | Python | false | false | from collections import defaultdict
import heapq
import functools
def compute_top_k_variance(students, scores, k):
"""
Students and scores are equal length lists of strings and floats, respectively.
The function computes for each string that appears at least k times in the list the variance of the top k scores
that correspond to it. Strings that appear fewer than k times are not considered.
"""
counts = {}
# Lot of boilerplate code is non-pythonic
if not pythonic:
# First, count the scores a student has
for i in range(len(students)):
if students[i] not in counts:
counts[students[i]] = 1
else:
counts[students[i]] += 1
# If a student has more than k scores, initialize an empty list for that student
all_scores = {}
for key in counts:
if counts[key] >= k:
all_scores[key] = []
# Accumulate the actual scores of the student
for i in range(len(students)):
if students[i] in all_scores:
all_scores[students[i]].append(scores[i])
# Now sort the scores of the student, and save the topk
top_k_scores = {}
for key in all_scores:
sorted_scores = sorted(all_scores[key])
top_k_scores[key] = []
for i in range(k):
top_k_scores[key].append(sorted_scores[len(sorted_scores) - 1 - i])
# Calculate variance
result = {}
for key in top_k_scores:
total = 0
for score in top_k_scores[key]:
total += score
mean = total / k
variance = 0
for score in top_k_scores[key]:
variance = variance + (score - mean) * (score - mean)
result[key] = variance
else:
# Using defaultdict with initialized list
all_scores = defaultdict(list)
for student, score in zip(students, scores):
all_scores[student].append(score)
# Dictionary comprehension with conditional
top_k_scores = {student: heapq.nlargest(k, scores) for student, scores in all_scores.items() if len(scores)
>= k}
# This is pythonic, but just not readable to me
if not readable:
result = {
student: functools.reduce(lambda variance, score: variance + (score - mean) ** 2, scores, 0)
for student, scores, mean in
(
(student, scores, sum(scores) / k)
for student, scores in top_k_scores.items()
)
}
# This is pythonic and readable to me
else:
result = defaultdict(float)
for student, scores in top_k_scores.items():
mean = sum(scores) / k
result[student] = functools.reduce(lambda variance, score: variance + (score - mean) ** 2, scores, 0)
return result
if __name__ == '__main__':
pythonic = True
readable = True
students = ['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C']
scores = [10, 20, 30, 0, 5, 10, 30, 20, 30]
k = 2
result = compute_top_k_variance(students, scores, k)
print(result) | UTF-8 | Python | false | false | 3,359 | py | 53 | pythonic.py | 52 | 0.536171 | 0.528431 | 0 | 93 | 35.129032 | 117 |
crazyqipython/MLFHPY3 | 807,453,854,010 | 645afa56af0a6e1105e12100ebc282e04a4c5081 | 2c98108963be84092d097c1ee281c30b12ffa652 | /tests/test_ch4.py | efc5b51a7e7ed84ecb05ac86326bfc395f6ea556 | []
| no_license | https://github.com/crazyqipython/MLFHPY3 | db7606096e05c3dcaaf4d9cff35db9f05bfb9666 | 966d08f81382211c9f60c75518a2a0b36d035c66 | refs/heads/master | 2021-09-03T16:36:38.378393 | 2018-01-10T13:38:41 | 2018-01-10T13:38:41 | 115,197,470 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf-8
import os
import unittest
from unittest import mock
import shutil
import tempfile
from pandas import *
from pandas.util.testing import assert_frame_equal
from ch4 import easyham_path, parse_email_list, parse_email,\
get_header_and_message, get_date, get_sender, get_subject,\
make_email_df,train_test_split,thread_flags, clean_subject
import dateutil.parser as dtp
class TestParseEmailList(unittest.TestCase):
def test_eashham_pass(self):
self.assertEqual(easyham_path, "../data/easyham")
@mock.patch("ch4.parse_email", return_value=3)
def test_parse_email_list(self, mock):
result = parse_email_list(['a', 'b','c'])
self.assertEqual(list(result), [3,3,3])
class TestParseEmail(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.path = os.path.join(self.dir + 'test.txt')
self.f = open(self.path, 'w')
self.f.write('aaaa\n\nThe owls are not what they seem')
self.f.close()
def tearDown(self):
shutil.rmtree(self.dir)
@mock.patch("ch4.get_subject", return_value='d')
@mock.patch("ch4.get_sender", return_value='c')
@mock.patch("ch4.get_date", return_value='b')
@mock.patch("ch4.get_header_and_message", return_value=['a','g'])
def test_parse_email(self, mock1, mock2, mock3, mock4):
result = parse_email(self.path)
self.assertEqual(result[:4],('b','c','d','g'))
def test_get_header_and_message(self):
result = get_header_and_message(self.path)
self.assertEqual((['aaaa\n'], ['The owls are not what they seem']), result)
def test_get_date(self):
result = get_date(["Date: 2017-01-12"])
self.assertEqual(result, dtp.parse('2017-01-12'))
def test_get_sender(self):
result = get_sender(["X-Egroups-From: Steve Burt <steve.burt@cursor-system.com> ", "From: Steve Burt <Steve_Burt@cursor-system.com>", "X-Yahoo-Profile: pyruse"])
self.assertEqual(result, "steve.burt@cursor-system.com")
def test_get_subject(self):
result = get_subject(["Subject: [IIU] Eircom aDSL Nat'ing"])
self.assertEqual(result, "[iiu] Eircom aDSL Nat'ing".lower())
class TestMakeEmailDf(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.df = DataFrame({},
columns=['date', 'sender', 'subject',
'message', 'filename'], index=[])
def test_make_email_df(self):
result = make_email_df(self.dir)
assert_frame_equal(result, self.df)
class TestTrainSplit(unittest.TestCase):
def setUp(self):
self.df = DataFrame({}, columns=['date', 'sender', 'subject',
'message', 'filename'])
def test_train_set_split(self):
result = train_test_split(self.df)
assert_frame_equal(result, self.df)
class TestThreadFlags(unittest.TestCase):
def test_thread_flags(self):
s = "re:asdfs"
result = thread_flags(s)
self.assertTrue(result)
def test_clean_subject(self):
s = "fw:aaa"
result = clean_subject(s)
self.assertEqual(result, 'aaa') | UTF-8 | Python | false | false | 3,291 | py | 16 | test_ch4.py | 14 | 0.606199 | 0.596475 | 0 | 94 | 33.031915 | 169 |
hikaru76/42tokyo | 11,029,476,031,300 | d7315613b627d5c064f784f2eca2eea1f6b475a4 | 481162d1e296ff8d7e48baacfadf4e43a0921cbc | /python-piscine/04/00/main.py | 113b5f7fc92221975733dac52d25363c2802b610 | []
| no_license | https://github.com/hikaru76/42tokyo | 1731a9dc03a77c8db5d2d8557b985f1a07b91daf | a2bcc870835edcd7a8488126024b082dfd2e2459 | refs/heads/master | 2023-06-04T08:30:02.194940 | 2021-06-28T12:18:37 | 2021-06-28T12:18:37 | 346,692,343 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from urllib import request
import sys
from bs4 import BeautifulSoup
class PokeWiki:
def __init__(self, name: str):
self.name = name
self.url = "https://pokemon.fandom.com/wiki/" + name
response = request.urlopen(self.url)
self.soup = BeautifulSoup(response)
response.close()
def species(self):
return (self.soup.text)
def abilities(self):
return (self.name)
def main():
print("Which pokemon do you want to know?")
name = sys.stdin.readline()
poke = PokeWiki(name)
print(poke.species())
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 628 | py | 20 | main.py | 9 | 0.619427 | 0.617834 | 0 | 27 | 22.296296 | 60 |
abhi8893/Intensive-python | 10,204,842,342,915 | 1bccc439c6508f7de6463a2480ed2b68ac1e81ae | b59189970730d2093e33d9c7ba33129baf2d683e | /tutorials/argparse-tutorial/greet_and_show_time.py | 93353d2dfdd4da8dee31f6a702ed281667120727 | []
| no_license | https://github.com/abhi8893/Intensive-python | be2c11a59c220ad02fd9f007f9ab5f85caaadbbc | e4b16926aa667760686ff111ecb71269c207acb4 | refs/heads/master | 2020-09-21T15:59:59.635512 | 2020-08-31T09:25:35 | 2020-08-31T09:25:35 | 224,839,629 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Link: https://t.ly/5882b
# Greet the person and show current time
import datetime as dt
def greet(name, show_time):
print(f'Hello {name}!')
if show_time:
print(f'The time right now is {dt.datetime.now()}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Greet the user and show the time if requested.')
parser.add_argument(
'--name',
required=True)
# NOTE: dest is used for the namespace if it's provided.
# show_time seems like a more apt name for a logical.
# But the user would just like a simple -t or --time option.
parser.add_argument('-t', '--time', dest='show_time', action='store_true')
args = parser.parse_args()
name, show_time = args.name, args.show_time
greet(name, show_time) | UTF-8 | Python | false | false | 833 | py | 143 | greet_and_show_time.py | 126 | 0.627851 | 0.623049 | 0 | 29 | 27.758621 | 78 |
fenn/skdb | 7,524,782,746,531 | f5d2ae120b0c0c3770bfbf0968f1f41db2083369 | e74e2d3a8babb739b15c825bdfb14c7828af2344 | /inventory/fabmap/models.py | e73179f88e9917c1049bc244dd47d8994a0d7240 | []
| no_license | https://github.com/fenn/skdb | 97ea9a4cc584c4b24ea5275ac36cfdd3f34118de | dfb6ec81952030c2d0c1dec880eb93da1d629667 | refs/heads/master | 2020-05-07T21:55:52.979394 | 2014-11-15T12:33:28 | 2014-11-15T12:33:28 | 3,238,526 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django import forms
from django.forms import ModelForm
#
# Defining models
#
#
class AccessModel(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
def __unicode__(self):
return self.name
class EquipmentCapability(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class EquipmentType(models.Model):
name = models.CharField(max_length=200)
maker = models.CharField(max_length=200)
website = models.URLField(blank=True)
capabilities = models.ManyToManyField(EquipmentCapability)
def __unicode__(self):
return self.maker + " " + self.name
class Site(models.Model):
lat = models.FloatField("Latitude")
lon = models.FloatField("Longitude")
name = models.CharField("Site name", max_length=200)
locname = models.CharField("Town/city", max_length=200)
website = models.URLField(blank=True)
manager = models.ForeignKey(User)
access = models.ForeignKey(AccessModel, verbose_name="Access model")
def __unicode__(self):
return self.name
class Equipment(models.Model):
type = models.ForeignKey(EquipmentType)
site = models.ForeignKey(Site)
notes = models.TextField()
def __unicode__(self):
return self.type
# class Material(models.Model):
# # DEFINEME
# name = models.CharField(max_length=500)
# formula = models.CharField(max_length=200, blank=True)
# iselement = models.BooleanField()
# description = models.TextField(blank=True)
#
# meltingpoint = models.FloatField("Melting point", blank=True)
# boilingpoint = models.FloatField("Boiling point", blank=True)
#
# density = models.FloatField(blank=True)
#
# modulus_elasticity = models.FloatField("Modulus of elasticity", blank=True)
# modulus_youngs = models.FloatField("Young's modulus", blank=True)
#
# hardness_mohs = models.FloatField(blank=True)
# hardness_brinnel = models.FloatField(blank=True)
#
# thermal_inductance = models.FloatField(blank=True)
# thermal_capacity = models.FloatField(blank=True)
# thermal_expansion = models.FloatField("Coefficient of thermal expansion (CTE)", blank=True)
#
# electrical_inductance = models.FloatField(blank=True)
# electrical_capacity = models.FloatField(blank=True)
# # nuclear profile? radioactivity_profile?
# # electrical profile?
# # thermal profile?
#
#class MaterialProperty(models.Model):
# name = models.CharField(max_length=200)
#
#
#class Method(models.Model): # Process? Method? TODO: DECIDE ON TERMINOLOGY
# # DEFINEME
# pass
#
#
# Registering tables with admin site
#
#
admin.site.register(EquipmentType)
admin.site.register(Site)
admin.site.register(EquipmentCapability)
admin.site.register(Equipment)
admin.site.register(AccessModel)
#
# Defining forms
#
#
class SiteForm(ModelForm):
class Meta:
model = Site
exclude = ('manager')
| UTF-8 | Python | false | false | 3,363 | py | 174 | models.py | 82 | 0.643176 | 0.635147 | 0 | 112 | 29.026786 | 99 |
jeffzpy/py-sql-generator | 6,365,141,562,579 | f2c9121adb6743c38dd85c3da519539b9f5778fb | 330987aa81ea79768a96ab620110c877189093bd | /settings.py | 56f679f6c7323823537304d1058f0ae6b3404756 | []
| no_license | https://github.com/jeffzpy/py-sql-generator | 97998272c8f4c131aec8ccfd8b1992534c88a170 | ef8542d74f518df4b536bf7e7f0089fef8327fa9 | refs/heads/master | 2020-03-25T21:57:24.863269 | 2018-08-09T19:41:09 | 2018-08-09T19:41:09 | 144,197,244 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | SQLALCHEMY_DATABASE_URI = 'mysql://username:password@ip:port/database?charset=utf8' | UTF-8 | Python | false | false | 83 | py | 1 | settings.py | 1 | 0.807229 | 0.795181 | 0 | 1 | 83 | 83 |
Knight-RS/CodeSmell | 901,943,150,095 | 9e5d2a614e5a3213b8ce798c0355c6186eb961aa | 3c5ef501cb8398b143aeeca015721d9da2e05f9f | /GodClassDetection/code/python/fine_tune_0716.py | c79efba2ffcffb62c4bc606d535ed0b7e26e96f5 | []
| no_license | https://github.com/Knight-RS/CodeSmell | 1e5e6996a02c11ba18e07ca11e85a1d6479ae7f3 | 1ad6b55604554372bcabfa236124f1ce7b47f1ae | refs/heads/master | 2021-05-16T21:57:31.764943 | 2020-09-11T03:23:29 | 2020-09-11T03:25:05 | 250,485,703 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- encoding: utf-8 -*-
"""
@File : fine_tune.py.py
@Time : 2020-05-14 20:23
@Author : knight
"""
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import os
import time
import keras
import matplotlib.pyplot as plt
import preprocess1 # 引入自定义的preprocess.py,所以preprocess.py不是主函数
from keras.models import load_model
from sklearn import metrics
from keras.models import model_from_json
from sklearn.model_selection import KFold
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
localtime = time.asctime(time.localtime(time.time()))
print("begin time", localtime)
# # 从JSON文件中加载模型
# with open(r'/Users/knight/Desktop/GodClassDetection-master-mao-new/trained_model/pre_training_model.json', 'r') as file:
# model_json = file.read()
#
# # 加载模型
# new_model = model_from_json(model_json)
# new_model.load_weights('model.json.h5')
# 加载预训练的模型
model_path = '/Users/knight/Desktop/GodClassDetection/trained_model/pre_training_model.h5'
# model_path = '/Users/knight/Desktop/GodClassDetection-master-mao-new/trained_model/pre_training_lstm_att_model.h5'
model = load_model(model_path)
# model.summary()
EMBEDDING_DIM = 200
MAX_SEQUENCE_LENGTH = 50
MAX_JACCARD_LENGTH = 30
INC_BATCH_SIZE = 80000
BASE_DIR = ''
# W2V_MODEL_DIR = '/Users/knight/Desktop/GodClassDetection/embedding_model/new_model6.bin'
W2V_MODEL_DIR = '/Users/knight/Desktop/GodClassDetection/embedding_model/new_model6_nltk.bin'
TRAIN_SET_DIR = '/Users/knight/Desktop/GodClassDetection/trainset' # 直接改成自己的路径
FULL_MN_DIR = TRAIN_SET_DIR
tokenizer = preprocess1.get_tokenizer(FULL_MN_DIR)
all_word_index = tokenizer.word_index
embedding_matrix = preprocess1.get_embedding_matrix(all_word_index, W2V_MODEL_DIR, dim=EMBEDDING_DIM)
acc_list = []
loss_list = []
projects = []
x_k = []
y_k = []
x_train = []
y_train = []
x_test = []
y_test = []
f1_scores = []
data_path = '/Users/knight/Desktop/GodClassDetection/trainset/test'
for project in sorted(os.listdir(data_path)):
projects.append(project)
path = data_path + "/" + project
print("path:", path)
x, y = preprocess1.get_xy_train1(path, tokenizer=tokenizer, mn_maxlen=MAX_SEQUENCE_LENGTH,
embedding_matrix=embedding_matrix)
x_k.append(x)
y_k.append(y)
# print(len(x_k))
print('Fine tune model.')
# 微调
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
idx = 0
for i in range(len(projects)):
for j in range(len(projects)):
if j == idx:
x_test.append(x_k[j])
y_test.append(y_k[j])
else:
x_train.append(x_k[j])
y_train.append(y_k[j])
idx = idx + 1
print("x_train:", len(x_train[0]))
print("y_train:", len(y_train))
print("x_test", len(x_test))
print("y_test:", len(y_test))
# print("x_train:", x_train)
# print("y_train:", y_train)
# print("x_test", x_test[0])
# print("y_test:", y_test[0])
# print("\n")
# 训练集度量值
hist = model.fit(x_train, y_train, nb_epoch=10, batch_size=5, verbose=1)
score = model.evaluate(x_train, y_train, verbose=0)
print("训练集的一些参数:!!!!!!!!!!!!")
print('train mse:', score[0])
print('train accuracy:', score[1])
loss_list.append(score[0])
acc_list.append(score[1])
localtime = time.asctime(time.localtime(time.time()))
print("endding time", localtime)
avg_accuracy = sum(acc_list) / len(acc_list)
avg_loss = sum(loss_list) / len(loss_list)
print("average accuracy: ", avg_accuracy)
print("average loss: ", avg_loss)
# 验证集度量值
print("\n验证集的一些参数:!!!!!!!!!!!!")
print("project:", projects[i])
score = model.evaluate(x_test, y_test, verbose=0)
# print model.metrics_names
print('Test score:', score[0])
print('Test accuracy:', score[1])
# input()
predict_posibility = model.predict(x_test)
predict_label = []
godclass_indices = []
for i, item in enumerate(predict_posibility):
if item <= 0.5:
predict_label.append(0)
else:
godclass_indices.append(i)
predict_label.append(1)
labeld_godclasses = []
for i, item in enumerate(y_test):
if item == 1:
labeld_godclasses.append(i)
precision = metrics.precision_score(y_test, predict_label)
recall = metrics.recall_score(y_test, predict_label)
f1_score = metrics.f1_score(y_test, predict_label)
print('test precision:', precision)
print('test recall', recall)
print('test f1 score:', f1_score)
f1_scores.append(f1_score)
x_train = []
y_train = []
x_test = []
y_test = []
avg_f1 = sum(f1_scores) / len(f1_scores)
print("average f1: ", avg_f1)
localtime = time.asctime(time.localtime(time.time()))
print("end time", localtime)
# # 模型保存JSON文件
# model_json = model.to_json()
# with open(r'/Users/knight/Desktop/GodClassDetection/trained_model/fine_tune_model.json', 'w') as file:
# file.write(model_json)
#
# # 保存微调后的模型
# model.save('/Users/knight/Desktop/GodClassDetection/trained_model/fine_tune_model.h5')
#
# model.summary()
| UTF-8 | Python | false | false | 5,270 | py | 143 | fine_tune_0716.py | 16 | 0.648236 | 0.633571 | 0 | 173 | 28.156069 | 122 |
summychou/Py2Ja | 16,234,976,394,803 | d3bd64c0878e9f4f41901a33feafd98677961ae8 | 2c82fcf5af5de3482bf448f62ca3b3c62c6b2627 | /Py2Ja/blog/urls.py | a6637ec35849b4269467532eb080a614082151d7 | [
"Apache-2.0"
]
| permissive | https://github.com/summychou/Py2Ja | c91d2f4bceaef7e84e295c650ffaa099a89fb1e0 | 99fda288f74b7a8761ac3d4ed0ce4440b976918d | refs/heads/master | 2016-09-13T05:59:43.385757 | 2016-05-05T13:15:53 | 2016-05-05T13:15:53 | 58,128,764 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
urlpatterns = [
url(r'^list', "blog.views.list_blogs", name="blog_list"),
url(r'^view/(?P<blog_id>\d+)', "blog.views.view_blog", name="blog_view"),
url(r'^write', "blog.views.write_blog", name="blog_write"),
url(r'^write', "blog.views.write_blog", name="blog_write"),
url(r'^write/image-upload/', "blog.views.upload_image", name="blog_write"),
]
| UTF-8 | Python | false | false | 400 | py | 73 | urls.py | 40 | 0.635 | 0.635 | 0 | 9 | 43.444444 | 79 |
Bsoong/CS-115 | 15,307,263,472,623 | 368151ce70e1d5185e535fbd4a0d376838f0faa4 | 30d826d0281ee214e6fb8fdf36ebbaa6b2cb7b2c | /Hw files/hw1.py | 36be9a841d64f82c1626010122d939bf1768a756 | []
| no_license | https://github.com/Bsoong/CS-115 | 66717a5bc29437f004db3bad3899938293bc71c8 | b0f3c8d309214c7f483bcfd90ebd454bcbb5948f | refs/heads/master | 2020-03-23T23:09:06.584267 | 2018-11-21T06:14:40 | 2018-11-21T06:14:40 | 142,218,694 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on Sep 10, 2017
@author: brand
'''
from cs115 import map, reduce
def mult(x,y):
return x*y
def factorial(n):
"""Using an if and an else statement, it allows for the function to hit all parameters or any
number that may be put in. The if makes sure the statement is possible and the the else multiplies
by itself to create the value of n!"""
return reduce(mult, range(1,n+1))
def add(a,b):
"""Simple add function to add two numbers together"""
return a+b
def mean(x):
"""Mean is the sum of the numbers/the amount, so using reduce and the add function youre able to add
x value and then divide it by the list"""
list= len(x)
return ((reduce(add, x)/list))
list=[]
def prime(n):
if n==1:
return True
return factorial(n-1)%n==n-1
print(prime(1))
| UTF-8 | Python | false | false | 910 | py | 34 | hw1.py | 32 | 0.595604 | 0.579121 | 0 | 34 | 23.705882 | 104 |
Manchangdx/daxiangmu | 18,751,827,248,238 | 6d542eec9afc583f5de0cec48f092802fa0a2e9e | 5fc6b9810d3ef030345246a3030dc2bf7809da04 | /jobplus/jobplus/handlers/admin.py | 7a5bfb802749bbd7b58ceb88912613b287aa3be1 | []
| no_license | https://github.com/Manchangdx/daxiangmu | f8d828929a2e9e4b05c63e38a877474c204e61c0 | 9882643a923b1d87068fe68155e61a46eade7922 | refs/heads/master | 2023-05-25T11:26:49.134919 | 2022-04-04T13:06:38 | 2022-04-04T13:06:38 | 131,313,758 | 0 | 0 | null | false | 2023-05-01T22:24:34 | 2018-04-27T15:25:35 | 2022-03-07T11:12:06 | 2023-05-01T22:24:33 | 422 | 1 | 0 | 4 | Python | false | false | from flask import (Blueprint, render_template, request, current_app,
redirect, url_for, flash)
from jobplus.decorators import admin_required
from jobplus.models import User, db, Job
admin = Blueprint('admin', __name__, url_prefix='/admin')
@admin.route('/')
@admin_required
def index():
return render_template('admin/index.html')
@admin.route('/user')
@admin_required
def user():
page = request.args.get('page', default=1, type=int)
pagination = User.query.paginate(
page=page,
per_page=current_app.config['ADMIN_PER_PAGE'],
error_out=False
)
return render_template('admin/users.html', pagination=pagination)
@admin.route('/users/<int:user_id>/disable')
@admin_required
def disable_user(user_id):
user = User.query.get_or_404(user_id)
if user.is_disable:
user.is_disable = False
flash('该用户已启用', 'success')
else:
user.is_disable = True
flash('该用户已禁用', 'info')
db.session.add(user)
db.session.commit()
return redirect(url_for('.user'))
@admin.route('/job')
@admin_required
def job():
page = request.args.get('page', default=1, type=int)
pagination = Job.query.paginate(
page=page,
per_page=current_app.config['ADMIN_PER_PAGE'],
error_out=False
)
return render_template('admin/jobs.html', pagination=pagination)
| UTF-8 | Python | false | false | 1,387 | py | 36 | admin.py | 15 | 0.655172 | 0.651504 | 0 | 51 | 25.72549 | 69 |
developeryuldashev/python-core | 3,135,326,151,161 | d5e0c1d90875b85c98cb9afac5d0304f2397dd36 | 47deebe6fefedb01fdce5d4e82f58bb08f8e1e92 | /python core/Lesson_13/file_1.py | 1148a464babb7ffd09f12e200c2a2ee0a66610df | []
| no_license | https://github.com/developeryuldashev/python-core | 5bb162603bdb5782acf05e3fb25ca5dd6347067a | 08fca77c9cfde69d93a7875b3fb65b98f3dabd78 | refs/heads/main | 2023-08-21T03:33:12.160133 | 2021-10-19T04:56:53 | 2021-10-19T04:56:53 | 393,383,696 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | exept='/\:|=?;[],^<>'
path='file1?.txt'
r=True
for char in path:
r=r and (char not in exept)
if r:
f=open(path,'w')
f.close()
else:
print('bu nom bilan fayl yaratib bo\'lmaydi') | UTF-8 | Python | false | false | 193 | py | 426 | file_1.py | 411 | 0.57513 | 0.569948 | 0 | 10 | 18.4 | 49 |
nileshhadalgi016/SQLite3-Tutorial | 11,132,555,252,549 | 88d0ced41a8f176a8a12bba6406b4162ea6dfc52 | 0482dec67616fc6a74095d9c248ba33ae28fcc96 | /superMarketApp/database.py | 685dc44b4c24623f5da15259e71f231cd4e95e95 | []
| no_license | https://github.com/nileshhadalgi016/SQLite3-Tutorial | 741289230d1a4492cabc7a1621c566d6c8c57f79 | 35719f92276667d357d013740a8735f852af5b6c | refs/heads/main | 2023-02-18T16:31:42.244311 | 2021-01-20T11:32:51 | 2021-01-20T11:32:51 | 326,337,046 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
# cur.execute('CREATE TABLE admin(username TEXT,password TEXT)')
# conn.commit()
# cur.execute("INSERT INTO admin VALUES('nilesh','nilesh')")
# conn.commit()
def verif_admin(username, password):
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
print(username)
print(password)
data = cur.execute('SELECT password FROM admin WHERE username = "{}"'.format(username)).fetchall()[0][0]
conn.close()
if password == data:
return True
else:
return False
except:
return False
def add_product(id_, name, quantity, cost):
if id_ == '' and name == '' and quantity == '' and cost == '':
return False, " You Cannot Leave It Empty "
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
print(id_, name, quantity, cost)
try:
quantity = int(quantity)
cost = int(cost)
print(id_, name, quantity, cost)
print(type(id_), type(name), type(quantity), type(cost))
check = cur.execute(f"SELECT * FROM products WHERE id = '{id_}'").fetchall()
if len(check) > 0:
return False, " This Product Already Exist Try Updating "
else:
cur.execute('INSERT INTO products VALUES("{}","{}",{},{})'.format(id_, name, quantity, cost))
conn.commit()
conn.close()
return True, " Product Added Successfully "
except:
return False, " Quantity and Cost are Integers "
except:
return False, " Failed Connecting Database "
def get_product_detail(prod_id):
if prod_id == '':
return False, " Enter Product Id "
else:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute(f"SELECT rowid,* FROM products where id='{prod_id}'").fetchall()
conn.close()
if len(data) == 0:
return False, " Product Don't Exist "
return True, data
def update_delete_product(rowid, id_, name, quantity, cost, qry):
if id_ == '' and name == '' and quantity == '' and cost == '':
return False, " You Cannot Leave It Empty "
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
try:
quantity = int(quantity)
cost = int(cost)
if qry == 'update':
cur.execute(
f"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}")
conn.commit()
return True, " Product Updated Successfully "
if qry == "delete":
cur.execute(f"DELETE FROM products WHERE rowid={rowid} ")
conn.commit()
return True, " Product Deleted Successfully "
conn.commit()
conn.close()
except:
return False, " Quantity and Cost are Integers "
except:
return False, " Failed Connecting Database "
def showProducts_all():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute("SELECT * FROM products").fetchall()
return True, data
def added_to_cart(prod_id, qry):
if prod_id == '':
return False, " Please Enter Product Id ",1
else:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
if qry == "add":
try:
cur.execute("""CREATE TABLE cart(
id TEXT,
name TEXT,
quantity INTEGER,
cost INTEGER) """)
except:
pass
data = cur.execute(f"""SELECT * FROM products WHERE id = '{prod_id}'""").fetchall()
cart_check = cur.execute(f"""SELECT * FROM cart WHERE id = '{prod_id}' """).fetchall()
if len(cart_check) == 0:
cur.execute(f"""INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})""")
conn.commit()
cur.execute(f"""UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'""")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True, " Product Added To Cart Successfully ",all_prods
elif len(cart_check) > 0:
cur.execute(
f"""UPDATE cart SET quantity = {(cart_check[0][2] + 1)},cost={(cart_check[0][3] + data[0][3])} WHERE id ='{prod_id}'""")
conn.commit()
cur.execute(f"""UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'""")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True, " Product Added To Cart Successfully ",all_prods
if qry == "remove":
cart_check = cur.execute(f"""SELECT * FROM cart WHERE id = '{prod_id}' """).fetchall()
if len(cart_check) == 0:
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True," Product Doesn't Exist ",all_prods
elif len(cart_check) > 0:
data = cur.execute(f"""SELECT * FROM products WHERE id = '{prod_id}'""").fetchall()
cur.execute(f"UPDATE products SET quantity = {(data[0][2]+cart_check[0][2])} WHERE id ='{prod_id}'")
conn.commit()
cur.execute(f"DELETE FROM cart WHERE id = '{prod_id}'")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True," Product Deleted Successfully ",all_prods
conn.close()
def get_cost():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute("SELECT * FROM cart").fetchall()
cost = 0
for i in data:
cost = cost+i[3]
return cost
def done_Drp():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
cur.execute("DROP TABLE cart")
conn.commit()
| UTF-8 | Python | false | false | 6,199 | py | 3 | database.py | 3 | 0.524439 | 0.517342 | 0 | 171 | 35.245614 | 140 |
andy90/ShortSolventModel | 18,640,158,093,324 | 3e157d839ab449c496ccf5a39a776b1e3e268cff | 7cdf32b4ac717fe30eac1b14106d6ab1730adb2f | /data_ions/Na_Ca_Cl_potentials/plot_compare_Cl_2Cl.py | c558116b68681195091b4d78e6ccad2386889434 | []
| no_license | https://github.com/andy90/ShortSolventModel | 7d64cfef3f41c2e266cf6a3ec87974784a248fe4 | daff3ace5f5e4b24da20a385374baa11ea789d1e | refs/heads/master | 2020-09-23T10:03:44.959876 | 2020-08-18T20:17:59 | 2020-08-18T20:17:59 | 225,467,170 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # compare the pmf Ca Cl with one or 2 Cl in the box
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
from matplotlib import rc, font_manager
import scipy.integrate as integrate
pmf1Cl=np.loadtxt("pmf_CaCl.xvg",unpack=True)
pmf2Cl=np.loadtxt("pmf_CaCl_2Cl.xvg",unpack=True)
pmf1=interp.InterpolatedUnivariateSpline(pmf1Cl[0],pmf1Cl[1])
pmf2=interp.InterpolatedUnivariateSpline(pmf2Cl[0],pmf2Cl[1])
kT=300.0/120.0
plt.rc('text', usetex=True)
plt.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman']})
r=np.linspace(0.26,1.25,100)
plt.figure()
ax1 = plt.subplot(111)
ax1.plot(r,np.exp(-(pmf1(r)-pmf1(1.25))),color="black",lw=3,label=r"$g_{\mathrm{CaCl}}(r) $")
ax1.plot(r,np.exp(-(pmf2(r)-pmf2(1.25))),color="blue",lw=3,label=r"$g_{\mathrm{CaCl}}(r) $, 2 Cl")
legend=ax1.legend(bbox_to_anchor=(1, 1),prop={'size':20})
frame=legend.get_frame()
ax1.tick_params(axis='both', which='major',length=5, pad=12,top='off', right='off')
plt.xticks( fontsize = 20)
plt.yticks( fontsize = 20)
plt.xlabel(r"$r$(nm)",fontsize=25)
plt.xlim(0.2,1.25)
plt.ylim(0,7)
plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig("comp_2Cl.pdf")
plt.show() | UTF-8 | Python | false | false | 1,182 | py | 39 | plot_compare_Cl_2Cl.py | 7 | 0.70643 | 0.642132 | 0 | 38 | 30.131579 | 98 |
JonAWhite/BitcoinBudget | 15,264,313,817,148 | cbb8787c3d8219bf7a4e2f98d2c265fb6376eed1 | 58c73027252736b1ee236d329cc6f82e8dd71d8d | /coinbase_execute_budget.py | 5b6c502eee1865fadcc8e63c8bfcb9c5b5caeacd | [
"MIT"
]
| permissive | https://github.com/JonAWhite/BitcoinBudget | 912f0a77416a95063176b7e5c337597942d7746a | cb600ca047bf5aa79651515382c9ba6ae4d84c53 | refs/heads/master | 2021-01-19T09:27:12.674106 | 2017-02-16T09:54:35 | 2017-02-16T09:54:35 | 82,111,619 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from coinbase.wallet.client import Client
import argparse
import json
def find_account(accounts, name):
for x in range(len(accounts)):
account = accounts[x]
if account.name == name:
return account
parser = argparse.ArgumentParser()
parser.add_argument("budget_file", help='JSON file in the form [{"name":"Budget Item 1", "amount_usd":5.50}]')
parser.add_argument("bitcoin_paid_price", help='The price you paid for the coins')
parser.add_argument("coinbase_api_key", help='Get this from coinbase.com')
parser.add_argument("coinbase_api_secret", help='Get this from coinbase.com')
args = parser.parse_args()
with open(args.budget_file) as data_file:
budget_accounts = json.load(data_file)
client = Client(args.coinbase_api_key, args.coinbase_api_secret)
primary_account = client.get_primary_account()
bitcoin_spot_price_in_usd = client.get_spot_price(currency_pair = 'BTC-USD')["amount"]
bitcoin_paid_price_in_usd = args.bitcoin_paid_price
accounts_obj = client.get_accounts(limit="100")
assert (accounts_obj.pagination is None) or isinstance(accounts_obj.pagination, dict)
accounts = accounts_obj[::]
total_usd = 0
for budget_account in budget_accounts:
total_usd += budget_account["amount_usd"]
total_btc = 0
for budget_account in budget_accounts:
budget_account_name = budget_account["name"]
budget_account_id = find_account(accounts, budget_account_name).id
budget_account_amount_usd = budget_account["amount_usd"]
budget_account_amount_btc = float("{0:.8f}".format(budget_account_amount_usd / float(bitcoin_paid_price_in_usd)))
total_btc += budget_account_amount_btc
print 'Transfering ' + str(budget_account_amount_btc) + ' BTC from ' + primary_account.name + ' (' + primary_account.id + ') to ' + budget_account_name + ' (' + budget_account_id + ')'
#client.transfer_money(primary_account.id, to=budget_account_id, amount=str(budget_account_amount_btc), currency="BTC")
print 'BTC-USD Spot Price: ' + str(bitcoin_spot_price_in_usd)
print 'BTC-USD Paid Price: ' + bitcoin_paid_price_in_usd
print 'Budget Total: $' + str("%.2f" % total_usd)
print 'Budget Total: ' + str("%.8f" % total_btc) + ' BTC'
| UTF-8 | Python | false | false | 2,181 | py | 3 | coinbase_execute_budget.py | 3 | 0.709766 | 0.703806 | 0 | 46 | 46.391304 | 189 |
misrayazgan/METU-CENG-Coursework | 12,567,074,348,966 | 35d16b276b6ecfe071cdf4e22ebdd9ad86a12c79 | fa36955459af1fe1db712aeaf17e5cc0051886f0 | /Ceng483 - Introduction to Computer Vision/THE3/the3.py | 81f1ad179ddb0aa6e5eea25206d0fa3311ce18fd | []
| no_license | https://github.com/misrayazgan/METU-CENG-Coursework | e97ab42f48f517bbc190736c3d100601a2d2e592 | bf7e429b620db2e71b6c127a26eb7a01a52bb251 | refs/heads/master | 2022-11-24T21:55:37.334017 | 2020-07-19T16:48:00 | 2020-07-19T16:48:00 | 180,148,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Feel free to change / extend / adapt this source code as needed to complete the homework, based on its requirements.
# This code is given as a starting point.
#
# REFEFERENCES
# The code is partly adapted from pytorch tutorials, including https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# ---- hyper-parameters ----
# You should tune these hyper-parameters using:
# (i) your reasoning and observations,
# (ii) by tuning it on the validation set, using the techniques discussed in class.
# You definitely can add more hyper-parameters here.
batch_size = 16
max_num_epoch = 100
hps = {'lr': 0.01, 'n_conv_layers': 2, 'kernel_size': 3, 'n_kernels': 8}
# n_conv_layers: 1,2,4
# kernel_size: 3,5
# n_kernels: 2,4,8
# learning_rate: between 0.0001 and 0.1
# ---- options ----
DEVICE_ID = 'cpu' # set to 'cpu' for cpu, 'cuda' / 'cuda:0' or similar for gpu.
LOG_DIR = 'checkpoints'
VISUALIZE = False # set True to visualize input, prediction and the output from the last batch
LOAD_CHKPT = False
# --- imports ---
import torch
import os
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import hw3utils
from utils import read_image
torch.multiprocessing.set_start_method('spawn', force=True)
# ---- utility functions -----
def get_loaders(batch_size,device):
data_root = 'ceng483-s19-hw3-dataset'
train_set = hw3utils.HW3ImageFolder(root=os.path.join(data_root,'train'),device=device)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0)
val_set = hw3utils.HW3ImageFolder(root=os.path.join(data_root,'val'),device=device)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=0)
test_set = hw3utils.HW3ImageFolder(root=os.path.join(data_root, 'test'), device=device)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=0)
return train_loader, val_loader, test_loader
# ---- ConvNet -----
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.n_conv_layers = hps['n_conv_layers']
self.n_kernels = hps['n_kernels']
self.kernel_size = hps['kernel_size']
self.padding = (self.kernel_size - 1) // 2
# nn.conv2d(in_channels, out_channels, kernel_size, stride, padding, ...)
self.four_layers = nn.Sequential(
nn.Conv2d(1, self.n_kernels, self.kernel_size, padding=(self.padding, self.padding)),
#nn.BatchNorm2d(self.n_kernels),
nn.ReLU(),
nn.Conv2d(self.n_kernels, self.n_kernels, self.kernel_size, padding=(self.padding, self.padding)),
#nn.BatchNorm2d(self.n_kernels),
nn.ReLU(),
nn.Conv2d(self.n_kernels, self.n_kernels, self.kernel_size, padding=(self.padding, self.padding)),
#nn.BatchNorm2d(self.n_kernels),
nn.ReLU(),
nn.Conv2d(self.n_kernels, 3, self.kernel_size, padding=(self.padding, self.padding)))
self.two_layers = nn.Sequential(
nn.Conv2d(1, self.n_kernels, self.kernel_size, padding=(self.padding, self.padding)),
#nn.BatchNorm2d(self.n_kernels),
nn.ReLU(),
nn.Conv2d(self.n_kernels, 3, self.kernel_size, padding=(self.padding, self.padding)))
self.one_layer = nn.Sequential(
nn.Conv2d(1, 3, self.kernel_size, padding=(self.padding, self.padding)))
def forward(self, grayscale_image):
# apply your network's layers in the following lines:
if self.n_conv_layers == 4:
x = self.four_layers(grayscale_image)
elif self.n_conv_layers == 2:
x = self.two_layers(grayscale_image)
elif self.n_conv_layers == 1:
x = self.one_layer(grayscale_image)
return x
# ---- training code -----
device = torch.device(DEVICE_ID)
#print('device: ' + str(device))
net = Net().to(device=device)
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=hps['lr'])
train_loader, val_loader, test_loader = get_loaders(batch_size,device)
if LOAD_CHKPT:
#print('loading the model from the checkpoint')
model.load_state_dict(os.path.join(LOG_DIR,'checkpoint.pt'))
prev_val_loss = float("inf")
optimal_epoch = 0
#print('training begins')
for epoch in range(max_num_epoch):
running_loss = 0.0 # training loss of the network
n_training = 0
for iteri, data in enumerate(train_loader, 0):
n_training += 1
#print(" train iteri:", iteri)
inputs, targets = data # inputs: low-resolution images(grayscale), targets: high-resolution images(rgb).
optimizer.zero_grad() # zero the parameter gradients
# do forward, backward, SGD step
preds = net(inputs) # get train outputs
loss = criterion(preds, targets) # get loss for each mini-batch(16 images)
loss.backward()
optimizer.step()
# print loss
running_loss += loss.item()
print_n = 100 # feel free to change this constant
if iteri % print_n == (print_n-1): # print every print_n mini-batches(16 images)
print('[%d, %5d] network-loss: %.3f' %
(epoch + 1, iteri + 1, running_loss / 100))
running_loss = 0.0
if (iteri==0) and VISUALIZE:
hw3utils.visualize_batch(inputs,preds,targets)
#print(epoch + 1, running_loss / n_training)
'''acc = 0
with torch.no_grad():
for i, val_data in enumerate(val_loader, 0):
val_inputs, val_targets = val_data
val_preds = net(val_inputs)
for j, pred in enumerate(val_preds):
pred = pred.to(torch.float64)
val_target = val_targets[j]
val_target = val_target.to(torch.float64)
colored_target = (val_target/2 + 0.5) * 255
colored_target = colored_target.permute(1,2,0)
colored_pred = (pred/2 + 0.5) * 255 # First take to range [0, 1], then to [0, 255]
colored_pred = colored_pred.permute(1,2,0) # Convert too 80x80x3
#print(colored_pred.shape, colored_target.shape)
est = colored_pred.cpu().numpy().astype(np.int64)
cur = colored_target.cpu().numpy().astype(np.int64)
cur_acc = (np.abs(cur - est) < 12).sum() / cur.shape[0]
acc += cur_acc
acc /= 5001
print(epoch + 1, acc)'''
if epoch % 5 == 4:
# Compute average validation loss every 5 epochs by a full pass over the validation set.
val_running_loss = 0.0
n_validation = 0
for i, val_data in enumerate(val_loader, 0):
n_validation += 1
val_inputs, val_targets = val_data
val_preds = net(val_inputs) # get validation outputs
val_loss = criterion(val_preds, val_targets) # get loss for each mini-batch(16 images)
val_running_loss += val_loss.item()
# print("Epoch", epoch + 1, "is over. Validation set loss:", val_running_loss / 125)
# print(epoch + 1, val_running_loss / n_validation)
# If loss has increased, apply early stopping.
if prev_val_loss < val_running_loss:
optimal_epoch = epoch - 5
# print("optimal number of epochs is:", optimal_epoch)
# print("prev_val_loss is:", prev_val_loss / 125)
# print("current loss is:", val_running_loss / 125)
break
else:
# If current loss < prev loss, then save the model
#print('Saving the model, end of epoch %d' % (epoch+1))
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
torch.save(net.state_dict(), os.path.join(LOG_DIR,'checkpoint.pt'))
hw3utils.visualize_batch(inputs,preds,targets,os.path.join(LOG_DIR,'example.png'))
prev_val_loss = val_running_loss
#print('Finished Training')
# number of validation images: 2000, size: 80x80x3
# number of test images: 2000, size: 80x80x3
validation_estimations = np.zeros((2000, 80, 80, 3))
test_estimations = np.zeros((2000, 80, 80, 3))
# One full pass over the validation set
with torch.no_grad(): # Run model without backpropagation
for i, data in enumerate(val_loader):
inputs, targets = data
preds = net(inputs)
for j, pred in enumerate(preds):
pred = pred.to(torch.float64)
colored_pred = (pred/2 + 0.5) * 255 # First take to range [0, 1], then to [0, 255]
colored_pred = colored_pred.permute(1,2,0) # Convert too 80x80x3
validation_estimations[i * batch_size + j] = colored_pred.cpu().numpy()
print("val est", validation_estimations)
# One full pass over the test set
with torch.no_grad(): # Run model without backpropagation
for i, data in enumerate(test_loader):
inputs, targets = data
preds = net(inputs)
for j, pred in enumerate(preds):
pred = pred.to(torch.float64)
colored_pred = (pred/2 + 0.5) * 255 # First take to range [0, 1], then to [0, 255]
colored_pred = colored_pred.permute(1,2,0) # Convert too 80x80x3
test_estimations[i * batch_size + j] = colored_pred.cpu().numpy()
print("test shape:", colored_pred.shape)
np.save("estimations_validation.npy", validation_estimations)
np.save("estimations_test.npy", test_estimations)
| UTF-8 | Python | false | false | 9,878 | py | 46 | the3.py | 24 | 0.603563 | 0.578761 | 0 | 228 | 42.324561 | 129 |
cohesity/management-sdk-python | 5,342,939,352,212 | c0ddf5ca0ef1bc8e4b2a8c34e87b8fa2fba8a98b | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/amqp_target_config.py | 1ff3075ffe529ed43c43a56b8b638aa82f875310 | [
"Apache-2.0"
]
| permissive | https://github.com/cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | 2023-08-04T06:30:37.551358 | 2023-07-19T12:02:12 | 2023-07-19T12:02:12 | 134,367,879 | 24 | 20 | Apache-2.0 | false | 2023-08-31T04:37:28 | 2018-05-22T06:04:19 | 2023-08-28T20:41:21 | 2023-08-31T04:37:26 | 55,712 | 20 | 18 | 5 | Python | false | false | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
class AMQPTargetConfig(object):
"""Implementation of the 'AMQPTargetConfig' model.
TODO: type description here.
Attributes:
certificate (string): Specifies the certificate.
exchange (string): Specifies the exchange.
filer_id (long|int): Specifies the filer id.
password (string): Specifies the password.
server_ip (string): Specifies the server ip.
username (string): Specifies the username.
virtual_host (string): Specifies the virtual host.
"""
# Create a mapping from Model property names to API property names
_names = {
"certificate":'certificate',
"exchange":'exchange',
"filer_id":'filerId',
"password":'password',
"server_ip":'serverIp',
"username":'username',
"virtual_host":'virtualHost',
}
def __init__(self,
certificate=None,
exchange=None,
filer_id=None,
password=None,
server_ip=None,
username=None,
virtual_host=None,
):
"""Constructor for the AMQPTargetConfig class"""
# Initialize members of the class
self.certificate = certificate
self.exchange = exchange
self.filer_id = filer_id
self.password = password
self.server_ip = server_ip
self.username = username
self.virtual_host = virtual_host
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
certificate = dictionary.get('certificate')
exchange = dictionary.get('exchange')
filer_id = dictionary.get('filerId')
password = dictionary.get('password')
server_ip = dictionary.get('serverIp')
username = dictionary.get('username')
virtual_host = dictionary.get('virtualHost')
# Return an object of this model
return cls(
certificate,
exchange,
filer_id,
password,
server_ip,
username,
virtual_host
) | UTF-8 | Python | false | false | 2,660 | py | 1,430 | amqp_target_config.py | 1,405 | 0.577068 | 0.575188 | 0 | 89 | 28.898876 | 81 |
pcuzner/gluster-deploy | 11,897,059,433,362 | 80e181cb2efe3aeff387bf9d4af382f3e3301b18 | 4c36e33ee5577416146622eb436d9a0938011d53 | /functions/config.py | e93be51f8e0f64753b99cfb6c8c17c43277b8087 | []
| no_license | https://github.com/pcuzner/gluster-deploy | 1167ed26d838ef559d26ee5ac64867d1a35a6134 | 151a8b62c742a23f952b792561616f00f007dc43 | refs/heads/master | 2021-01-21T21:55:04.201990 | 2016-03-30T23:12:11 | 2016-03-30T23:12:11 | 35,012,116 | 5 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# config.py
#
# Copyright 2013 Paul Cuzner <paul.cuzner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import logging
import os
import sys
def init():
""" Initialise global variables """
global LOGFILE
global LOGLEVEL
global ACCESSKEY
global LOGGER
global SVCPORT
global HTTPPORT
global NICPREFIX
global PGMROOT
global MSGSTACK
global BTRFSKERNEL
global SERVERLIST
global BRICKPATH
global VGNAME
global LVNAME
global CLUSTER
global STRIPEUNIT
global STRIPEWIDTH
global SNAPSHOTVERSION
# import is placed inside the function to prevent circular imports
from functions.utils import MsgStack
from functions.gluster import Cluster
SNAPSHOTVERSION = '3.6'
LOGFILE = 'gluster-deploy.log'
LOGLEVEL = logging.getLevelName('DEBUG') # DEBUG | INFO | ERROR
logging.basicConfig(filename=LOGFILE,
level=LOGLEVEL,
filemode='w')
LOGGER = logging.getLogger()
# NIC types that would be presented to the admin for subnet
# selection
NICPREFIX = ('eth', 'bond', 'em','virbr0','ovirtmgmt','rhevm')
# TCP port for glusterd
SVCPORT = 24007
# Default port for the web UI
HTTPPORT = 8080
# create a msgstack object used to track task progress
MSGSTACK = MsgStack()
# Minimum kernel version required to support btrfs filesystem bricks
BTRFSKERNEL = '3.6'
PGMROOT = os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[0]
# List of servers specified through the config file (deploy.cfg)
SERVERLIST = []
# default path for the gluster brick to be bound to (overriden by deploy.cfg)
BRICKPATH = "/gluster/brick1"
VGNAME = "gluster"
LVNAME = "gluster"
# define the vars used to describe the raidgroup
STRIPEUNIT = ''
STRIPEWIDTH = ''
# Create a cluster object to act as the top level object
CLUSTER = Cluster()
| UTF-8 | Python | false | false | 2,545 | py | 43 | config.py | 23 | 0.72888 | 0.715521 | 0 | 97 | 25.226804 | 78 |
YY87927/2021_IoTProject | 7,009,386,666,413 | 3dcc6bdd9a2eeea1ced801524f18ccfba8688c1a | 4ffb4ebaaa484ccc27572434c0d673e35254f630 | /final/first_rpi/cloud_autopilot_rpi.py | da6a99a23b5fc6dd474adfa3c8ab08552d8f3921 | []
| no_license | https://github.com/YY87927/2021_IoTProject | e0c577f28a1708d727694777d0ff3bc1a24b23ab | c172f2c27dae030f8e3c50a249feae51bdb479d2 | refs/heads/master | 2023-02-15T23:20:05.130251 | 2021-01-11T01:17:18 | 2021-01-11T01:17:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import http.client, urllib
import json
import sys
import RPi.GPIO as GPIO
from directions import *
from mcs_functions import *
import threading
v=343
TRIGGER_PINL = 31
ECHO_PINL = 29
TRIGGER_PINR = 11
ECHO_PINR = 13
LIGHT_SENSE = 36
LED_PIN = 40
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(LED_PIN, GPIO.OUT)
auto_speed = 25
auto_turn_speed = 40
manual_speed = 17
manual_turn_speed = 20
def measure_distance(trigger, echo):
GPIO.output(trigger, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(trigger, GPIO.LOW)
pulse_start = time.time()
while GPIO.input(echo) == GPIO.LOW:
pulse_start = time.time()
while GPIO.input(echo) == GPIO.HIGH:
pulse_end = time.time()
t = pulse_end - pulse_start
d = t * v
d = d/2
return d
def parse_response(response, channel):
response = response[response.find(channel):]
response = response[response.rfind("value"):]
# response = response[response.find(":")+2:response.find("\"")+1]
response = response[response.find(":")+2:]
response = response[:response.find("\"")]
return response
def readLDR():
reading = 0
GPIO.setup(LIGHT_SENSE, GPIO.OUT)
GPIO.output(LIGHT_SENSE, False)
time.sleep(.1)
GPIO.setup(LIGHT_SENSE, GPIO.IN)
while(GPIO.input(LIGHT_SENSE)==False):
reading = reading+1
return reading
def control_LED():
while True:
if readLDR()>30000:
GPIO.output(LED_PIN, True)
else:
GPIO.output(LED_PIN, False)
def main():
mode = 1
old_distanceL = 100
old_distanceR = 100
turn = False
t = threading.Thread(target=control_LED)
t.start()
while True:
distanceL = measure_distance(TRIGGER_PINL, ECHO_PINL)
distanceR = measure_distance(TRIGGER_PINR, ECHO_PINR)
payload = {"datapoints":[{"dataChnId":"auto","values":{"value":str(distanceL) + "," + str(distanceR)}}]}
post_to_mcs(payload)
if mode==1:
response = parse_response(str(get_from_mcs("direction_m")), "direction_m")
if response=='2':
mode = 2
continue
manual_control(response)
if mode==2:
response = parse_response(str(get_from_mcs("direction")), "direction")
response2 = parse_response(str(get_from_mcs("direction_m")), "direction_m")
if response2=='1':
mode = 1
continue
auto_pilot(response, distanceR, distanceL, old_distanceR, old_distanceL, turn)
stop()
time.sleep(.2)
old_distanceR = distanceR
old_distanceL = distanceL
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 2,643 | py | 11 | cloud_autopilot_rpi.py | 10 | 0.620885 | 0.599319 | 0 | 101 | 25.178218 | 112 |
hwiyoung/OpenCV_with_Python | 4,028,679,344,373 | 43442c0043f671bbfcfdee6d929391d086075fea | f8a3bf08fe0400bac87bb9c6e34b80e3f63fb58d | /Chapter02/blurring.py | 4ab81eebf734a4373104c024149342518d5880c7 | []
| no_license | https://github.com/hwiyoung/OpenCV_with_Python | f0bf706317fe93e1dea10a65ab09f2cc48297fe0 | 4564cf723e79546bf9a3a7de32c2866e6c53937e | refs/heads/master | 2021-09-19T17:09:29.454548 | 2018-07-30T04:38:45 | 2018-07-30T04:38:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
img = cv2.imread('./images/IMG.jpg')
rows, cols = img.shape[:2] # row, column, channel
kernel_identity = np.array([ [0, 0, 0], [0, 1, 0], [0, 0, 0] ])
kernel_3x3 = np.ones((3, 3), np.float32) / 9.0 # normalization
kernel_5x5 = np.ones((5, 5), np.float32) / 25.0 # normalization
cv2.imshow('Original', img)
output = cv2.filter2D(img, -1, kernel_identity)
cv2.imshow('Identity filter', output)
output = cv2.filter2D(img, -1, kernel_3x3)
#output = cv2.blur(img, (3, 3))
cv2.imshow('3x3 filter', output)
output = cv2.filter2D(img, -1, kernel_5x5)
#output = cv2.blur(img, (5, 5))
cv2.imshow('5x5 filter', output)
cv2.waitKey()
| UTF-8 | Python | false | false | 659 | py | 24 | blurring.py | 24 | 0.655539 | 0.569044 | 0 | 24 | 26.458333 | 63 |
M9Bnhf57/recognition | 14,388,140,482,316 | fa9adde3d1d75d726134ec6a84660c467cf1ee33 | 46461628949c24e48cbe117d91d4e9be6faeeb77 | /Untitled-1.py | dd402f6e4a50dc225e87024e09e7eba8802fd88f | []
| no_license | https://github.com/M9Bnhf57/recognition | 4f00b97d72aa4e996b54bfb7c14ec0834babcfb5 | cf33682557ac4cb6b9e9aae84d986f6488d8f31d | refs/heads/master | 2020-09-13T03:42:14.980740 | 2019-11-19T08:35:15 | 2019-11-19T08:35:15 | 222,646,695 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy
import matplotlib.pyplot as plt
import skimage.data
from skimage import io
k=1
while k < 76:
path = "C:/dataset_segmentation/"+ str(k) +".jpg" #str(k)
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
imgarray = numpy.array(img)
imgarray[imgarray<200] = 1.0
imgarray[imgarray>=56] = 0.0
i=0
while i < 3:
array = numpy.array([[1]*21*i]*21*i)
imgarray = cv2.morphologyEx(imgarray, cv2.MORPH_OPEN, array)
array = numpy.array([[1]*63*i]*63*i)
imgarray = cv2.morphologyEx(imgarray, cv2.MORPH_CLOSE, array)
i = i + 1
plt.imshow(imgarray, cmap='Greys') #Needs to be in row,col order
plt.savefig(str(k) + ".jpg", cmap='Greys')
k += 1 | UTF-8 | Python | false | false | 730 | py | 5 | Untitled-1.py | 5 | 0.620548 | 0.575342 | 0 | 29 | 24.206897 | 69 |
samyakbvs/Internship_backend | 19,095,424,634,679 | e29f134dc39002b3d9a811d34455bad5d128919a | 7817e3acb344d760873b26d0d64f9c6d81e04b42 | /lms/migrations/0001_initial.py | b8a26ad4c322e20c1113b73fa0ce974ca46c400f | []
| no_license | https://github.com/samyakbvs/Internship_backend | 3d07aeb1d64e5be0e1b62b5df026639e71bd23f6 | 718864f41b269be2a217fb6cc7959bdf8127db63 | refs/heads/master | 2021-01-04T13:18:47.016927 | 2020-04-30T11:12:01 | 2020-04-30T11:12:01 | 240,568,333 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.1 on 2020-01-26 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=264)),
('Init_time', models.DateTimeField()),
('Description', models.TextField()),
('Author', models.CharField(max_length=264)),
('Image', models.ImageField(upload_to='')),
],
),
]
| UTF-8 | Python | false | false | 712 | py | 12 | 0001_initial.py | 12 | 0.543539 | 0.515449 | 0 | 25 | 27.48 | 114 |
kongyanye/cgnattest | 18,468,359,385,102 | 0667d1887f86876a7ba8604072504c17a4e29f5e | e86a26c6cf60699200c955b6ec12ea4269b95db3 | /clearsession.py | c00412456ed7a010aa4640e840d011770ff7b59e | []
| no_license | https://github.com/kongyanye/cgnattest | d69438e0344423236050bd3801c542f2652eb179 | 2139b5f48f269f65df63a28d99c59c6fe515d22b | refs/heads/master | 2023-01-12T00:41:16.485607 | 2020-11-22T14:45:23 | 2020-11-22T14:45:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scapy.all import *
from paramiko import SSHClient, AutoAddPolicy
import pdb
import random
import string
import ipaddress
import time
def ssh_fgt(ipadd,user,pwd,command):
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(ipadd,username=user,password=pwd)
stdin, stdout, stderr = client.exec_command(command)
print(stdout.read())
client.close()
fgtip="10.10.1.1"
fgtuser="admin"
fgtpass="fortinet"
ssh_fgt(fgtip, fgtuser, fgtpass, "diag sys session clear")
| UTF-8 | Python | false | false | 529 | py | 14 | clearsession.py | 14 | 0.73913 | 0.727788 | 0 | 25 | 20.16 | 58 |
theredferny/CodeCademy_Learn_Python_3 | 14,937,896,293,334 | a63cec6dc72ce3fb63c17512b583ce2daf48399a | 0ad2a8e86c69d6d95d042ddfe6ee6449b25d84c9 | /Unit_09-Files/09_01_11-Reading_a_JSON_File.py | d7792f4497d39cd67cecaf94676de6432874a660 | []
| no_license | https://github.com/theredferny/CodeCademy_Learn_Python_3 | 31747cc8a640039012af34fc78e9113932ce6909 | d0f34dedf041d1c70f4fb5df1b2baf681c97739e | refs/heads/master | 2023-03-24T10:00:48.755045 | 2021-03-16T15:18:19 | 2021-03-16T15:18:19 | 342,673,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
with open('message.json') as message_json:
message = json.load(message_json)
print(message['text'])
"""
message.json
{
"text": "Now that's JSON!",
"secret text": "Now that's some _serious_ JSON!"
}
"""
"""
CSV isn’t the only file format that Python has a built-in library for. We can also use Python’s file tools to read and write JSON. JSON, an abbreviation of JavaScript Object Notation, is a file format inspired by the programming language JavaScript. The name, like CSV is a bit of a misnomer — some JSON is not valid JavaScript (and plenty of JavaScript is not valid JSON).
JSON’s format is endearingly similar to Python dictionary syntax, and so JSON files might be easy to read from a Python developer standpoint. Nonetheless, Python comes with a json package that will help us parse JSON files into actual Python dictionaries. Suppose we have a JSON file like the following:
purchase_14781239.json
{
'user': 'ellen_greg',
'action': 'purchase',
'item_id': '14781239',
}
We would be able to read that in as a Python dictionary with the following code:
json_reader.py
import json
with open('purchase_14781239.json') as purchase_json:
purchase_data = json.load(purchase_json)
print(purchase_data['user'])
# Prints 'ellen_greg'
First we import the json package. We opened the file using our trusty open() command. Since we’re opening it in
read-mode we just need to pass the file name. We save the file in the temporary variable purchase_json.
We continue by parsing purchase_json using json.load(),
creating a Python dictionary out of the file.
Saving the results into purchase_data means we can interact with it.
We print out one of the values of the JSON file by keying into the
purchase_data object.""" | UTF-8 | Python | false | false | 1,774 | py | 113 | 09_01_11-Reading_a_JSON_File.py | 111 | 0.747732 | 0.734127 | 0 | 48 | 35.770833 | 373 |
kevin-bigler/pytris | 17,128,329,607,748 | 632296f308036f19d8d108c0c69160e75bbeb3d7 | be2955418744c1d530e229dd316e40987c4e9428 | /grid.py | 95b9ceedd3ac3ab3fca94440ca3f20cea682b830 | []
| no_license | https://github.com/kevin-bigler/pytris | 113f33ed9cdc8b479fff2a6d97204f5a94dd242a | 0fa65422127e61dc2567551bdd15b36aa21de2c3 | refs/heads/master | 2023-04-05T19:13:23.874664 | 2021-05-08T17:59:19 | 2021-05-08T17:59:19 | 338,723,791 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Grid data structure, for convenience"""
class Grid:
def __init__(self, width, height, initFn=lambda x, y: {}):
"""
Args:
width
height
initFn - invoked to initialize each grid item's value, ie initFn(x, y). default is empty map
"""
self.width = width
self.height = height
def get_row(y):
return [initFn(x, y) for x in range(width)]
self.arr = [get_row(y) for y in range(height)]
def __call__(self, x, y):
return self.arr[y][x]
def debug_print(self):
print('grid width x height =', len(self.arr[0]), 'x', len(self.arr))
def __iter__(self):
"""Each element is returned as a tuple of (val, pos), as in (val, (x, y))"""
for y in range(self.height):
for x in range(self.width):
yield (self.arr[y][x], (x, y))
def vals(self):
"""Iterator for grid values"""
for y in range(self.height):
for x in range(self.width):
yield self.arr[y][x]
@property
def size(self):
return (self.width, self.height)
| UTF-8 | Python | false | false | 1,141 | py | 16 | grid.py | 15 | 0.521472 | 0.520596 | 0 | 38 | 29.026316 | 104 |
oliviamikola/choosemybook | 4,148,938,454,912 | acbd3668dd2140acbf9249660451cc0cc82cc518 | a2231de311cfe71884940243e411f6e6e1e11254 | /receiver.py | 48b503fc5976b8934ceed03a343fd1e84daed858 | []
| no_license | https://github.com/oliviamikola/choosemybook | 2f81c7ed8fcd2ef591a820cbcdbb54107db2b726 | 3e3969921228238775c7eb88e0854a527a2fd426 | refs/heads/master | 2021-05-18T07:39:07.006190 | 2020-07-07T03:41:07 | 2020-07-07T03:41:07 | 251,183,262 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import time
import xmltodict
from typing import Dict
from Objects.book import Book
from Objects.user import User
from Exceptions.apiConnectionError import ApiConnectionError
from Exceptions.authorizationError import AuthorizationError
class Receiver:
def __init__(self, user: User):
"""
Initializes important user data to collect book data
:param user: The user whose data is being collected
"""
self._user = user
self._last_checked_time = time.time()
def collect_books(self, shelf: str = "to-read") -> Dict[str, Book]:
"""
Collects the books from the given shelf
:param shelf: shelf to get books from
:return: dictionary containing books from shelf
"""
url = "https://www.goodreads.com/review/list"
params = {**{"v": 2, "shelf": shelf, "page": 1, "per_page": 200}, **(self._user.set_user_params())}
self.__update_time()
# Will contain all books on the shelf and their data
books_on_shelf = {}
while True:
goodreads_data = self.__retry(url, params)
# Getting the number of books on the shelf and the last one received
book_stats = goodreads_data.split("\n")[10].split('"')
total_books = book_stats[5]
last_book_in_list = book_stats[3]
self.__translate_data(goodreads_data, books_on_shelf)
# Check if all the books are accounted for
if total_books == last_book_in_list:
break
params["page"] += 1
self.__keep_time()
return books_on_shelf
def __keep_time(self) -> None:
"""
Ensures goodreads API usage rules are followed
:return: None
"""
while time.time() - self._last_checked_time < 1.0:
pass
self.__update_time()
def __update_time(self) -> None:
"""
Updates the last checked time
:return: None
"""
self._last_checked_time = time.time()
def __retry(self, url: str, params: Dict[str, object]) -> str:
"""
Attempts data collection from goodreads
Raises an error if unable to connect
:param url: request url
:param params: params required to get goodreads data
:return: received data if successful, otherwise raises error
"""
for _ in range(5):
self.__update_time()
request = requests.get(url, params=params)
if request.status_code == 200:
return request.text
if request.status_code == 401:
raise AuthorizationError("Not authorized to access goodreads data")
self.__keep_time()
else:
raise ApiConnectionError("Unable to connect to goodreads API")
def __translate_data(self, goodreads_data: str, books_on_shelf: Dict[str, Book]) -> None:
"""
Translates goodreads data from received XML to a dictionary of Book objects
:param goodreads_data: data received from goodreads API call
:param books_on_shelf: dictionary containing Book objects
:return: None
"""
dict_data = xmltodict.parse(goodreads_data)["GoodreadsResponse"]["reviews"]["review"]
# Get essential data about the book and add to dictionary
for book_data in dict_data:
book_data = book_data["book"]
book = Book(book_data)
books_on_shelf[book.get_id()] = book
| UTF-8 | Python | false | false | 3,534 | py | 8 | receiver.py | 6 | 0.591398 | 0.586022 | 0 | 103 | 33.31068 | 107 |
jayasindhura/OMK | 10,307,921,521,417 | 7fbfb33ed2e709532fe1849a5308abee27a50a26 | 09082a4e2ee10340d8a1581d377d9dd064cf8169 | /sma/admin.py | 5a83bbae3406904620f834efc40b0f711aa9493b | []
| no_license | https://github.com/jayasindhura/OMK | 819d42d57ec330e7b1036eb1707d12ff82c1a67c | 90cc6e20f94af18561bbe29cb42c2d2c61893e16 | refs/heads/master | 2021-01-09T17:42:07.964008 | 2020-02-22T18:46:13 | 2020-02-22T18:46:13 | 242,393,531 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import School,Grade,Student,Mentor,Student_Group_Mentor_Assignment,Session_Schedule
# Register your models here.
class SchoolList(admin.ModelAdmin):
list_display = ('school_name', 'school_email', 'school_phone')
list_filter = ('school_name', 'school_email')
search_fields = ('school_name',)
ordering = ['school_name']
class GradeList(admin.ModelAdmin):
list_display = ('school','grade_num')
list_filter = ('school','grade_num')
search_fields = ('school','grade_num')
ordering = ['school']
class StudentList(admin.ModelAdmin):
list_display = ('student_first_name','student_middle_name', 'student_last_name','school_id','grade_id')
list_filter = ('student_first_name', 'student_last_name','school_id','school_id')
search_fields = ('student_first_name', 'student_last_name','school_id','school_id')
ordering = ['student_first_name']
class MentorList(admin.ModelAdmin):
list_display = ('mentor_first_name','mentor_middle_name', 'mentor_last_name','mentor_email','mentor_phone')
list_filter = ('mentor_first_name','mentor_middle_name', 'mentor_last_name','mentor_email','mentor_phone')
search_fields = ('mentor_first_name','mentor_middle_name', 'mentor_last_name','mentor_email','mentor_phone')
ordering = ['mentor_first_name']
class GroupMentorAssignmentList(admin.ModelAdmin):
list_display = ('group_name','school','grade','mentor')
list_filter = ('group_name','school','grade','mentor')
search_fields = ('group_name','school','grade','mentor')
ordering = ['group_name']
class SessionScheduleList(admin.ModelAdmin):
list_display = ('session_location','mentor','group','session_start_date','session_start_date')
list_filter = ('session_location','mentor','group','session_start_date','session_start_date')
search_fields = ('session_location','mentor','group','session_start_date','session_start_date')
ordering = ['session_location']
admin.site.register(School,SchoolList)
admin.site.register(Grade,GradeList)
admin.site.register(Student,StudentList)
admin.site.register(Mentor,MentorList)
admin.site.register(Student_Group_Mentor_Assignment,GroupMentorAssignmentList)
admin.site.register(Session_Schedule,SessionScheduleList)
| UTF-8 | Python | false | false | 2,275 | py | 17 | admin.py | 17 | 0.712967 | 0.712967 | 0 | 46 | 48.391304 | 112 |
tikiet/sbuild | 1,228,360,671,471 | ef9ef4bb8b9f941d1325f4caab5a040d1c8ba577 | 25d845f4cc6eaed16cee8cf34942f13fddeefbd3 | /tools/get_keys.py | 94e14cc6a187ec42758eda0545fd59ce62ad33ee | []
| no_license | https://github.com/tikiet/sbuild | 8b50fcd3e4151c191de276a38f96095696175916 | 3f9e88af33073f8f3b47697dd930820538dde322 | refs/heads/master | 2021-01-18T05:29:05.060028 | 2015-03-26T14:07:56 | 2015-03-26T14:09:55 | 32,930,394 | 0 | 0 | null | true | 2015-03-26T13:56:30 | 2015-03-26T13:56:30 | 2015-03-26T13:55:07 | 2015-03-26T13:46:02 | 165 | 0 | 0 | 0 | null | null | null | import os, shutil
from sys import argv
import xml.etree.ElementTree as ET
import itertools
string_names = open('string_names.txt', 'w')
def parse(_file):
l = []
tree = ET.parse(_file)
root = tree.getroot()
for string in root:
l.append(string.get('name'))
return l
def get_keys(path_to_res):
big_l = []
for root, dirs, files in os.walk(path_to_res):
for f in files:
little_l = parse(os.path.join(root, f))
big_l.append(little_l)
return big_l
def main(path_to_res):
big_l = get_keys(path_to_res)
combined = list(itertools.chain.from_iterable(big_l))
for name in combined:
name = str(name)
string_names.write(name +'\n')
string_names.close()
if __name__ == "__main__":
main(argv[1])
| UTF-8 | Python | false | false | 797 | py | 5 | get_keys.py | 3 | 0.593476 | 0.592221 | 0 | 33 | 23.151515 | 57 |
clhiker/WPython | 18,554,258,734,869 | b23089d09cd4d6ca097a4e97967aa172f58dc5db | c1edf63a93d0a6d914256e848904c374db050ae0 | /Python/Python基础知识/面对对象编程/test.py | 2f0cde920f3a2e41e1d0579c490590551fbc8a3d | []
| no_license | https://github.com/clhiker/WPython | 97b53dff7e5a2b480e1bf98d1b2bf2a1742cb1cd | b21cbfe9aa4356d0fe70d5a56c8b91d41f5588a1 | refs/heads/master | 2020-03-30T03:41:50.459769 | 2018-09-28T07:36:21 | 2018-09-28T07:36:21 | 150,703,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def main():
test = "p|q&f|~d"
pos = test.find("m")
print(pos)
pro_var = {""}
standard_oper = "|&~"
for i in range(0, len(test)):
if (standard_oper.find(test[i]) == -1):
pro_var.add(test[i])
l = list(pro_var)
for i in range(len(l)):
print(l[i])
pattern = test.split("|")
for i in range(len(pattern)):
print(pattern[i],end=" ")
def test():
test = "p|q&f|~d"
for i in range(len(test)):
if(test[i] != '~'):
print("fdsa")
test() | UTF-8 | Python | false | false | 529 | py | 171 | test.py | 165 | 0.470699 | 0.466919 | 0 | 23 | 22.043478 | 47 |
LiveTowardTheSea/bert_model | 17,549,236,384,239 | fc2483cb39999adda038e59e93f8b5d98285b6bf | 80b6c944728ca61ddc477fa2bb9fa312e1c4c81d | /model/bert_model.py | 69d77c305e6447c6c44e1ff54c58175468f93f2e | []
| no_license | https://github.com/LiveTowardTheSea/bert_model | c2f480cf4bdbec10ad7830584ca2d84f90345082 | 8ce207a8319bf53e064dacb7d574ff8f912e863a | refs/heads/master | 2023-04-28T06:15:13.337520 | 2021-05-20T13:44:26 | 2021-05-20T13:44:26 | 369,202,848 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from model.Linear_Decoder import Softmax_decoder
import torch.nn as nn
from Encoder import *
from CRF_Decoder import *
from Linear_Decoder import *
class bert_model(nn.Module):
def __init__(self,config,tag_num):
super(bert_model, self).__init__()
self.config = config
self.encoder = Encoder()
if self.config.decoder == 'crf':
self.decoder = CRF_decoder(config.d_model, tag_num)
elif self.config.decoder == 'softmax':
self.decoder = Softmax_decoder(config.d_model,tag_num)
def forward(self, src, trg, src_mask, trg_mask,use_gpu):
# src: (batch_size,seq_len)
# trg: (batch_size,seq_len)
# src_mask (batch_size,seq_len)
# 如果 return_atten 为 true 的话,返回每一层的attention,否则是一个空列表
encoder_output = self.encoder(src, src_mask, use_gpu)
encoder_output = encoder_output[:,1:-1,:]
loss,path_ = self.decoder.loss(encoder_output, trg, trg_mask, use_gpu)
return loss,path_ | UTF-8 | Python | false | false | 1,033 | py | 5 | bert_model.py | 4 | 0.632694 | 0.630676 | 0 | 24 | 40.333333 | 78 |
2Ler/python | 7,765,300,884,076 | 97728613c8cf5ee0a0731260609304e53c373851 | ba59ad9dfd9d63892d1ed4c02cdc154a9af884a5 | /reverse_string.py | d41d3a6a9014d4d81433ed07ed2186bdbf4b5a4e | []
| no_license | https://github.com/2Ler/python | e1fb21785f14d73c12a6a4b3e19f0b58fdabe906 | 9aaf4d5a23d004618a71bc4adc738138e5abf9a9 | refs/heads/master | 2020-03-19T05:07:29.922983 | 2018-06-04T10:01:14 | 2018-06-04T10:01:14 | 135,902,764 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s=raw_input()
reverse=""
for i in xrange(len(s)-1,-1,-1):
reverse+=s[i]
print reverse
| UTF-8 | Python | false | false | 88 | py | 24 | reverse_string.py | 23 | 0.636364 | 0.602273 | 0 | 5 | 16.6 | 32 |
KevSed/ML_Seminar | 17,987,323,044,174 | 764621fd285a1972c6a4fac5407831179222086b | c99a1f59c7219ea34a931f03a4764519ce8b58be | /Alternative/runAnalysis.py | c9ad9bb344d537ade283604758cdb11bf683897a | [
"MIT"
]
| permissive | https://github.com/KevSed/ML_Seminar | 8fc5ae601745e1c0c71017fc857518db43fb2d1a | 3360d56e5808e041627f82d8309d70aa86563058 | refs/heads/master | 2020-03-20T06:00:51.634676 | 2018-07-31T16:56:12 | 2018-07-31T16:56:12 | 137,234,962 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from dataprep import *
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.models import model_from_json
import h5py
from keras import optimizers
from sklearn.metrics import confusion_matrix,classification_report
import itertools
from performance import *
from grid import *
"""
These are important things to read!!!
This is the file where you should put everything what you want to run.
* dataprep.py: Contains methods for data preparation:
- get_data(Folder, File) reads all images from Folder and saves it to File.
This File should be a hdf5 file, which contains the pixel arrays of each
image in File['X_Train'] and labels in File['Y_Train']. By default images
are resized to (50,100) shape.
- prep_eval(file, outfile) reads in file obtained by get_data(Folder,File)
and performs the image scan. The image scan calculates the means of all
pixels within a defined window. By default this window has the dimension (2,4).
Furthermore, it performs train_test and train_val splits and saves all
datasets to outfile. To take the different class sizes into account weights
are calculated and saved in outfile as well.
* grid.py: Contains the class grid. A class member has to be initialized with a
dataset obtained from prep_eval containing training and validation datasets and
their corresponding weightsself. By default it contains a string in Path_base containing the
path to the folder where model structures to test are saved and a string in Out_file
containing the path to the folder where fitted model structures, histories and
trained weights are saved and have to be set correctly by the user before calling a method
of this class.
- make_model(self, dense_layer, activation, dropouts, out_activation, number)
receives a network structure to be tested and saves the structure in a json
file. The user has to assign a number to this model which is saved in the
suffix of this file
- fit_model(self, number, outnumber, outfile): Fits model corresponding to number and
saves model and history of the training in Out_file as .json file and trained weights as .hdf5 file.
The user has to assign a outnumber to this run. Loss and accuravy histories are saved in outfile.
* performance.py: Contains methods to evaluate the performance of models and
to select models from grid search.
- model_evaluator(mod, infiles, outfiles, lab, unblind=False): receives number corresponding to
fitted model and the folder containing the corresponding json file for the model structure
and hdf5 files for trained weights in infiles.
Confusion matrix is plotted, classification_report is printed and the
NN output for a given label correspondig to a analyzed class.
This label is defined by lab. Outputs are saved to outfiles.
If unblind is set to True the Performance will be evluated on the test dataset,
otherwise on the validation dataset
- model_selector(infiles, batch_size, tested_models, acc_thr, loss_thr, outfile)
selects models tested in grid search passing the two step selection.
A model has to pass a minimum validation accuracy threshold, defined by the train accuracy - validation accuracy,
and a loss threshold defined by the loss function after all epochs being smaller than loss_thr*loss function
after 1 epoch. It returns an array with all numbers corresponding to models
passing the selection. tested_models correspond to the tested number of
layer structures (This is very hard coded, sorry for that)
- model_plotter(infiles,outfiles, batch_size, tested_models) plots for all
tested layer structures the validation accuracy and loss after all epochs
as function of the tested batch size given in batch_size and tested
acitvation functions for the hidden layers and output layer.
Outputs are saved to out files.
* A complete example of the analysis is given below. Note that it is necessary to download the Dataset first:
https://www.kaggle.com/paultimothymooney/kermany2018. Example_plots shows images after resizing and input distributions for the DNN
The hdf5 file will be 3.3GB large, if you use the train dataset in the OCT2017 folder
"""
Path = '/home/bjoern/Studium/ML'
example_plots()
get_data(Path + '/OCT2017/test/', 'test.hdf5')
prep_eval('test.hdf5', 'evaluate_test.hdf5')
dataset = h5py.File('evaluate_test.hdf5', mode='r')
X = grid(dataset)
X.Path_Base = Path + '/ML_Seminar/Alternative/TestOutput/ModelBase/'
X.Out_file = Path + '/ML_Seminar/Alternative/TestOutput/Files/'
# Tested models
dense_layer =[[1024,512,128,64,32], [1024, 512,256,128,64,32,16], [512,256,128,64,32,16], [1024, 256, 64, 16], [512, 128, 32]]
activation = ['relu', 'elu']
out_activation = ['softmax', 'sigmoid']
dropouts = [[0.5, 0.4,0.4, 0.3, 0.2], [0.5, 0.4, 0.4, 0.4, 0.2, 0.2, 0.1], [0.4, 0.4, 0.3, 0.3, 0.2, 0.1], [0.6, 0.4, 0.2, 0.1], [0.5, 0.3, 0.1]]
tested_models = []
number = 0
for i in range(len(dense_layer)):
for acti in activation:
for out_acti in out_activation:
X.make_model(dense_layer[i], acti, dropouts[i], out_acti, number)
tested_models.append(number)
number += 1
batch_size = [50, 64,100, 128, 256, 512]
for i in tested_models:
for b in range(len(batch_size)):
X.batch_size = batch_size[b]
X.fit_model(i, i*len(batch_size)+b, Path + '/ML_Seminar/Alternative/TestOutput/Output/')
models = model_selector(Path + '/ML_Seminar/Alternative/TestOutput/Files/', [50,64,100,128,256,512], 5, 0.20, 1, Path + '/ML_Seminar/Alternative/TestOutput/AfterSel/')
model_plotter(Path + '/ML_Seminar/Alternative/TestOutput/Files/', Path + '/ML_Seminar/Alternative/TestOutput/ModelEval/',[50,64,100,128,256,512],5)
print(models)
accuracy = []
for i in models:
accuracy.append(model_evaluator(i, 'evaluate_test.hdf5',Path + '/ML_Seminar/Alternative/TestOutput/Files/',Path + '/ML_Seminar/Alternative/TestOutput/Performance/',0, unblind=False ))
# Spoiler ALERT: This is the model which wins the test !!!
model_evaluator(6,'evaluate_test.hdf5', Path + '/ML_Seminar/Alternative/TestOutput/Files/',Path + '/ML_Seminar/Alternative/TestOutput/PerformanceTest/',0, unblind=True )
print(accuracy)
# Select the model with the highest accuracy
| UTF-8 | Python | false | false | 6,495 | py | 35 | runAnalysis.py | 16 | 0.730408 | 0.698075 | 0 | 132 | 48.204545 | 187 |
jonatasoli/djangorestframework_example | 10,849,087,394,013 | c5b4eca79bc54b8df26f543ba310a534f07e001d | 01e997012763f746242ed79096cafb50e1d5022a | /todo/tests/test_todo_api.py | 0202beee6c5f14d61c1a4d048139397916b62cc0 | []
| no_license | https://github.com/jonatasoli/djangorestframework_example | 824210b5cd891beeb2562bb722b991113f814069 | 7d8e2d26164eb1453df4e39021c1c1d502ce12bd | refs/heads/master | 2020-04-29T20:34:57.984079 | 2019-03-25T22:17:08 | 2019-03-25T22:17:08 | 176,387,834 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Todo
from todo.serializers import TodoSerializer
TODO_URL = reverse('todo:todo-list')
class PublicTodoApiTests(TestCase):
"""Test the publically available todos API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access this endpoint"""
res = self.client.get(TODO_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTodoAPITests(TestCase):
"""Test todos can be retrieved by authorized user"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@jonatasoliveira.me',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_todo_list(self):
"""Test retrieving a list of todos"""
Todo.objects.create(user=self.user,
task='first',
description='Todo 1')
Todo.objects.create(user=self.user,
task='second',
description='Todo 2')
res = self.client.get(TODO_URL)
todos = Todo.objects.all().order_by('id')
serializer = TodoSerializer(todos, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_todos_limited_to_user(self):
"""Test that only todos for authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'other@jonatasoliveira.me',
'testpass'
)
Todo.objects.create(user=user2,
task='New',
description='New todo')
todo = Todo.objects.create(user=self.user,
task='User Task',
description='User Task Description')
res = self.client.get(TODO_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['task'], todo.task)
def test_create_todo_successful(self):
"""Test creating a new Todos"""
payload = {'task': 'Todo Successful', 'description': 'Description Successful'}
self.client.post(TODO_URL, payload)
exists = Todo.objects.filter(
user=self.user,
task=payload['task']
).exists()
self.assertTrue(exists)
def test_create_todo_invalid(self):
"""Test creating invalid ingredient fails"""
payload = {'task:': '', 'description': ''}
res = self.client.post(TODO_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| UTF-8 | Python | false | false | 3,000 | py | 8 | test_todo_api.py | 4 | 0.597333 | 0.591333 | 0 | 92 | 31.608696 | 86 |
netiapaul/python-prac | 8,074,538,542,699 | 0a69c51b65c7cf9e37e13c451cde5de9a0e115a5 | 68f1a9282ae9e6b6be3d9709b70bc6b534e24235 | /prac.py | 38d7bd11075c2570740f618e77d27b5a7f20967a | []
| no_license | https://github.com/netiapaul/python-prac | c24e74db78d22f58a9ac05d4587fd3a828dccf45 | 148a3f3d786bcbf26da2ad37d160395e079ec4f2 | refs/heads/master | 2020-05-17T14:19:11.363943 | 2019-04-27T11:06:10 | 2019-04-27T11:06:10 | 183,761,868 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Question 1: Accept two int values from user and return their product. If the product is greater than 1000, then return their sum
a=int(input('Enter a number: '))
b=int(input('Enter a second number: '))
c=a*b
print(c)
if c > 1000:
d=a+b
print(d)
# Question 2: Given a two list of ints create a third list such that should contain only odd numbers from the first list and even numbers from the second list
def mergeList(listOne, listTwo):
thirdList = []
for num in listOne:
if(num % 2 != 0):
thirdList.append(num)
for num in listTwo:
if(num % 2 == 0):
thirdList.append(num)
return thirdList
print("Merged List is")
listOne = [10, 20, 23, 11, 17]
listTwo = [13, 43, 24, 36, 12]
print(mergeList(listOne, listTwo))
# Question 3: Given a list of numbers, Iterate it and print only those numbers which are divisible of 5
rand=[value for value in range(1,21)]
for num in rand:
if num % 5 == 0:
print(num)
# Question 4: Given a list of ints, return True if first and last number of a list is same
numList = [10, 20, 30, 40, 10]
firstnum=numList[0]
lastnum=numList[-1]
if firstnum == lastnum:
print(True)
else:
print(False)
# Question 5: Given a string and an int n, remove characters from string starting from zero upto n and return a new string
def removeChars(str, n):
return str[:n]
print("Removing n number of chars")
print(removeChars("programmer", 4))
# Question 6: Given a range of numbers. Iterate from o^th number to the end number and print the sum of the current number and previous number
def sumNum(num):
previousNum=0
for i in range(num):
sum = previousNum + i
print(sum)
print("Printing current and previous number sum in a given range")
sumNum(10) | UTF-8 | Python | false | false | 1,755 | py | 2 | prac.py | 1 | 0.691738 | 0.65755 | 0 | 66 | 25.606061 | 159 |
bomb1e/gotgit | 14,482,629,764,979 | 38dcf2a0f86f865f4186f8a22ed61546cae2f3f0 | 7192da38b6afd3c60f80ccbecb3040cf34369ce3 | /5bf281fb-178c-493c-8c7a-422ac4bf1fe0.py | 17b5e2676c69807688a7079e0ac67acd2c1895ef | []
| no_license | https://github.com/bomb1e/gotgit | 6699fa9e6adb0a79f22441df41a102b09b78c2ce | 3c14cb6d1d0c4cba6a1e24a9899462d2e4bee2ce | refs/heads/master | 2020-06-12T06:00:34.695425 | 2018-08-01T02:00:02 | 2018-08-01T02:00:02 | 75,600,461 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
def func38(arg218, arg219):
var223 = func39(arg218, arg219)
var268 = var226(var223, arg219)
def func47(arg269, arg270):
var271 = 1861037871 | (arg218 + var223) ^ 775
var272 = arg219 ^ arg270
var273 = arg219 | arg270
var274 = arg269 | -460
var275 = arg219 ^ var271
var276 = (var223 & var271) & var273
var277 = (var272 ^ (arg269 + 671)) ^ -499988221
var278 = var268 - (var268 - var268) + var276
var279 = arg219 ^ arg269
var280 = (arg270 - var274) | var276
var281 = (arg218 & var280) & (var278 | var280)
var282 = arg270 | var277 | var268 + 1168894387
result = var281 - (arg218 | var282 + var282 & (var278 - (arg270 | arg270)))
return result
var283 = func47(var223, var268)
var284 = -1900041792 & (var283 & (var223 | 1659806111))
var285 = 746729960 | (var284 + arg218)
var286 = arg219 ^ (arg218 - var268) + arg218
var287 = ((var284 | -466928079) | arg219) - var285
var288 = (var287 & (var283 - arg219)) ^ var223
var289 = (var286 ^ var283 + arg218) - var284
var290 = var284 - var284 ^ (var287 + var289)
var291 = (594 + var223 + arg218) ^ arg219
if arg218 < var284:
var292 = arg218 | var290 & arg218 & var288
else:
var292 = var290 | ((var286 - var288) + var289)
var293 = 1769219912 + var223
var294 = (var283 & var290 & -876529538) ^ var285
if var293 < arg218:
var295 = (var268 & arg219 ^ var290) ^ var291
else:
var295 = var288 | var291
if var285 < var284:
var296 = (var223 - arg218) | var289
else:
var296 = arg218 ^ var294 + var223 & var293
var297 = var223 & var291
var298 = (var297 - var283 ^ var223) + var286
var299 = 1236150398 + var284
var300 = (var283 + var290) & arg218 + var286
var301 = (var287 ^ var287) - var297 ^ var298
var302 = var223 & (var284 + arg218) & var287
if var302 < var293:
var303 = var283 ^ (var302 ^ var223) + var302
else:
var303 = var293 | var293
var304 = var283 ^ (var293 - var223) ^ var268
var305 = (var301 + var285) & var300 - var291
result = var283 ^ ((var302 + (var302 | ((var291 + var285) + var294 ^ var268) - var304 + var287 & var301) | var268) + arg218)
return result
def func43(arg227, arg228):
var245 = func44(arg227, arg228)
var246 = arg228 & -871 - -85142947 - arg227
var247 = var245 ^ (var245 | arg227) ^ var246
var248 = var245 & arg228 - var246 | -1983456900
var249 = var248 + ((arg228 + 661) - arg228)
var250 = var246 | (var247 | var249)
var251 = var247 ^ (var250 - (var250 + var248))
var252 = var247 + var247 + -611862821
var253 = ((arg228 & var251) ^ arg228) ^ var250
var254 = (var252 & (var251 ^ var246)) + var248
var255 = -71 | (var251 - (var253 - arg227))
var256 = var248 & var245
var257 = var256 - var256 & var256 + var249
var258 = var245 ^ var248
var259 = (1446724509 | arg228) ^ var257 - var258
var260 = var245 | var254 - var248
var261 = var250 + var245
var262 = var251 ^ var253
var263 = (var245 & var259) - var252 + var248
var264 = (84 ^ var252) ^ var253 ^ var250
var265 = var245 - (var248 - (var251 - var262))
if var265 < var246:
var266 = var255 + var255 | var253 & var264
else:
var266 = (var253 - var249 & -456) + var258
var267 = var258 & arg227 & var251 | var251
result = var258 | -591693613
return result
def func46(arg231, arg232):
var233 = (793 - arg232 | 523) + arg232
var234 = (arg231 & arg232) - arg232
var235 = arg232 ^ var233
var236 = arg232 + (arg232 & arg231) - arg231
var237 = var233 & (1957523330 ^ 977 + var233)
var238 = var235 - (var233 & var235) ^ 1688429538
var239 = ((var234 & arg232) + var233) & arg232
var240 = var234 + var238
var241 = var236 & var239
var242 = var241 + var237 | arg232 | var238
var243 = var233 + var235 - var242
result = (var236 | (arg232 & var237 & (var240 - var234 + var236 & var242 & -417)) & -922225234 + var237 + -171) ^ arg231
return result
def func42():
closure = [9]
def func41(arg224, arg225):
closure[0] += func43(arg224, arg225)
return closure[0]
func = func41
return func
var226 = func42()
def func26(arg192, arg193):
if arg193 < arg192:
var198 = class27()
else:
var198 = class29()
for var199 in (6 + i for i in range(5)):
var200 = var198.func28
var200(var199, var199)
var204 = func31(arg192, arg193)
if arg192 < arg192:
var209 = class33()
else:
var209 = class35()
for var210 in range(5):
var211 = var209.func34
var211(arg193, var204)
var216 = func37(arg193, var204)
var217 = var216 & 831 - arg192 + (var216 ^ ((-828 + var204 & (((-556 & (var204 | (arg193 + 213946737 - var216 | var204 & var216 ^ 957 - var204) | var216) & arg193) | -614308809) - arg193)) - var216) ^ arg193) & arg193
result = -722 & arg192 | ((-216 | (38 ^ var217 ^ (var204 + arg192 & arg192) ^ arg193 - var217)) | 678 - var217)
return result
def func37(arg212, arg213):
var214 = 0
for var215 in range(15):
var214 += var214 | arg213 - arg212
return var214
class class35(object):
def func34(self, arg207, arg208):
return 0
class class33(class35):
def func34(self, arg205, arg206):
return 0
class class29(object):
def func28(self, arg196, arg197):
result = (arg197 - ((arg197 + (-1853737721 ^ arg197) & -1) ^ arg196)) | arg197
return result
class class27(class29):
def func28(self, arg194, arg195):
result = -129390661 | arg195 - arg195
return result
def func23(arg166, arg167):
var171 = func24(arg166, arg167)
var172 = ((arg167 | arg167) + arg167) & arg166
var173 = -558 & (arg166 ^ var171) | -892092737
var174 = var171 + (arg166 & 869) | arg166
var175 = var173 + var173
var176 = ((arg166 ^ 397) | -66987574) - 491
var177 = arg166 | ((var175 + var173) | var173)
var178 = var175 + var173
if var176 < arg167:
var179 = (arg166 & var173 - 734628731) + var173
else:
var179 = (var176 | arg166 | var172) ^ -2115848754
var180 = var174 & var171 & var171 - arg167
var181 = var180 & (-1279410669 | var178 ^ var177)
var182 = var180 - var172
var183 = var174 & (var177 ^ (var174 | var175))
var184 = arg167 & var180
var185 = var176 ^ (arg167 & (var173 + var176))
var186 = var174 & var175 - var184 - 251
if var183 < var175:
var187 = var186 - (var173 - var185)
else:
var187 = var181 - var176
if arg166 < var173:
var188 = var176 ^ ((var171 & var175) | var176)
else:
var188 = (var186 + var186) + var181 + var171
var189 = ((var183 & var172) + var173) & var176
var190 = var181 | (var171 | var178)
var191 = 752 | (var184 | -207739077) & var183
result = var182 | var189 ^ var182 ^ (var184 + -724 ^ var177 | arg167) & var185 + var174 - var189 & var189 + -530683127
return result
def func14(arg112, arg113):
var117 = func15(arg113, arg112)
var147 = func17(arg113, var117)
def func22(arg148, arg149):
var150 = (-1087146933 + 1164014654) - arg112 ^ arg112
var151 = (arg113 - 1960645414) | arg113
var152 = -529920019 | var151 - arg148 - var150
var153 = arg149 & (arg112 ^ var151)
var154 = var117 | var152
if arg112 < var154:
var155 = (var147 + arg149 + var151) ^ arg149
else:
var155 = ((var152 - arg112) + 1430726206) ^ var154
var156 = arg149 - var117
var157 = var117 & arg148
var158 = 299 & var157 ^ (-1331392972 + arg112)
if arg113 < var117:
var159 = var151 - var150 ^ -617 | var156
else:
var159 = (var154 + var152 | arg148) & arg113
var160 = var147 - arg149 - arg113
var161 = var153 | (var147 + var158) & arg112
var162 = var150 | var152 | var150 + var153
result = (arg148 | arg113) ^ var162
return result
var163 = func22(var147, arg113)
var164 = arg112 | var117
var165 = arg112 & (-126926423 & var163)
result = arg113 + 912699397 + var165
return result
def func17(arg118, arg119):
var124 = func18(arg119, arg118)
var125 = func21()
if arg118 < var125:
var126 = arg118 ^ (-323 - var125) & arg118
else:
var126 = 768 | var124
var127 = arg118 + var125
var128 = var127 - -1782558051 + arg119 | 1317133696
var129 = -257385621 - -570
var130 = arg118 | arg119 + var125 ^ var129
var131 = arg118 ^ var128 | var128 | 210
var132 = var128 | var131
var133 = var130 & var129 & (var129 & var124)
var134 = (var124 | var133) & var130
var135 = arg119 ^ -2031051419 & var133 | var124
if var127 < var132:
var136 = -192653490 | (var125 | 19 & 740)
else:
var136 = var131 - arg119
var137 = var135 - var131 | var132 | var125
var138 = 1242342693 | var125
var139 = (467 ^ var125 & var128) - arg118
var140 = var137 & (var131 ^ var129 - var128)
var141 = var128 & (var129 - var129)
var142 = (var139 ^ var133) | var140 | arg118
var143 = var133 ^ var138 & var128
var144 = var129 - (var138 ^ var131 & var137)
var145 = var133 | (var144 & var135 + var133)
var146 = var124 & (var143 | var124 + var127)
result = (var133 ^ (var141 ^ var138)) | var124 - 811062646
return result
def func21():
func19()
result = len(range(35))
func20()
return result
def func20():
global len
del len
def func19():
global len
len = lambda x : -9
def func18(arg120, arg121):
var122 = 0
for var123 in range(19):
var122 += arg121 & arg121 - 5
return var122
def func7(arg66, arg67):
var107 = func8(arg66, arg67)
var108 = (-1424109015 + (((arg66 - var107) ^ 419 | ((((var107 - (833 | (arg66 - -758)) | (-1447654260 + 641) + arg67) & arg66 + (var107 - var107)) | var107 - var107) & 856927050)) + var107)) & -528 - 357796952 ^ arg66 - arg66
var109 = var108 & (195035338 ^ arg66 + arg67 ^ -626681852)
var110 = ((1277437794 - (arg66 | var108 - (270 + (-340 & var108 | var108 ^ (arg66 | var108) & var108))) - arg66 ^ arg66 - arg66) + var107) | arg67
var111 = var110 ^ var107 | arg67
result = var111 - var107
return result
def func8(arg68, arg69):
var70 = 0
for var106 in func9(arg69, var70):
var70 += arg69 ^ arg68 - var70
return var70
def func10(arg73, arg74):
var79 = func11(arg73, arg74)
var84 = func12(arg73, var79)
def func13(arg85, arg86):
var87 = arg85 | var84
var88 = var84 & 534440213 - 987 - (var84 + (arg74 + var84) - var79) - var87
var89 = (var88 & -1183127773) & arg74 + (var79 + var88)
result = (var79 ^ var88) | (var87 ^ arg85)
return result
var90 = func13(var84, var79)
var91 = -1783670203 + arg73
var92 = -1987296275 ^ arg73 | -1636295380 & ((-1502093551 ^ -316) - arg73 - -121 + 224 - var91)
var93 = arg73 + var90
var94 = (((var91 - var91 - arg73 - arg74 + var91 - (var93 + var93 + arg73) ^ var93) - var90 ^ ((var79 | (var79 + ((arg74 - (var84 & var92)) + var90)) & var84) - var90) & 577 | var91) + var91) - var84
result = arg74 ^ 198630352
return result
def func12(arg80, arg81):
var82 = 0
for var83 in range(14):
var82 += arg81 & (7 + arg81)
return var82
def func11(arg75, arg76):
var77 = 0
for var78 in range(17):
var77 += var77 & arg76 | arg75
return var77
def func9(arg71, arg72):
var95 = func10(-1670616381, -783)
yield var95
var96 = 600 ^ 105
yield var96
var97 = -687 + 981427770 & arg71
yield var97
var98 = (861736703 | var96 - arg72) - var96
yield var98
var99 = (-959 - arg71 & arg71) | arg72
yield var99
var100 = -103 & -549
yield var100
var101 = arg72 | ((arg71 - var98) & var97)
yield var101
var102 = (var101 ^ var97 & var97) & var98
yield var102
var103 = (var100 + var96 - var102) | -606
yield var103
var104 = ((76 ^ var102) + arg71) & var103
yield var104
var105 = var99 | var101
yield var105
def func1(arg1, arg2):
var15 = func2(arg2, arg1)
var20 = func4(arg1, var15)
def func5(arg21, arg22):
if arg22 < arg1:
var23 = -1597816210 - var15
else:
var23 = var20 | arg1
var24 = ((var20 + arg22) - var15) - arg1
var25 = 209843346 | arg22 - var24 + var24
var26 = (var15 | arg1) + (var20 & var15)
var27 = var24 | arg21
var28 = (arg21 & (var27 ^ var25)) + var26
var29 = -1006568382 & (var27 - (var24 & var28))
var30 = var28 & arg2 | var29 + arg22
var31 = arg1 - var29
var32 = ((arg21 ^ -413) + var15) | var28
result = arg1 & var29 & arg2
return result
var33 = func5(arg1, var15)
def func6(arg34, arg35):
var36 = ((arg35 & var15) ^ var20) - var33
var37 = (var36 ^ arg35) | var15 + arg1
var38 = var15 | arg35 | var33
if var37 < arg34:
var39 = var38 + var20
else:
var39 = 564639441 + (var15 ^ arg35) ^ arg35
var40 = 919589444 ^ arg34
var41 = var36 + arg2
var42 = arg1 ^ (1113512850 - var20 ^ var33)
var43 = (arg1 & var33) | var38 & var37
var44 = (var33 + var43) & 973708478 & var43
var45 = var33 + (var44 ^ var33) + var20
var46 = var45 & arg35 + var45
var47 = arg34 - var44 - var42 - var46
var48 = var47 + var38 + var44
var49 = (var36 + var38) & arg34 + arg1
var50 = 643871592 ^ var44
var51 = arg34 & arg34 ^ var46 | var33
var52 = var15 | var44 - var38
if var45 < var44:
var53 = (var42 - 657 & var50) ^ var52
else:
var53 = var44 - (var46 ^ var20) | arg1
var54 = 869 & arg34 | var36 & arg34
var55 = arg35 ^ -1856795692
var56 = var41 + var45
var57 = var47 | var52 & var42 - arg2
var58 = var56 - (var46 & -784)
var59 = ((var40 - var45) - 458) ^ 829
result = ((((arg34 - var36) | var48) & var50 - var58) & ((var46 ^ ((var56 + var46 & var51) + var51)) - var48)) ^ arg35
return result
var60 = func6(var33, arg1)
var61 = var15 ^ var15
var62 = arg1 ^ arg1
var63 = var33 & ((1939695419 & ((var60 & var20 + var33 | var20 & var33) - (-1674400426 ^ 964630041))) | ((((((var60 & arg2) - arg2) - var20) - -2125333711 - ((var20 | 2045898726 + var33) + var61)) - var15 - var62) ^ -279)) ^ arg1
var64 = (((var63 | -745) | arg2 & arg2) ^ var61 + var62) ^ var15 | ((var15 ^ var60) | var61 + ((var63 | var63 & arg2 | var61) ^ arg1 & -1907774184 ^ var33 | var61) ^ var62 | var60 ^ arg1 & arg1)
var65 = var60 + (1330880070 ^ var33) ^ var20 + (-882 & var33)
result = var61 + (var64 & arg1) - var62
return result
def func4(arg16, arg17):
var18 = 0
for var19 in range(46):
var18 += arg16 & -2 ^ arg17
return var18
def func2(arg3, arg4):
var9 = func3(arg3, arg4)
var10 = 1296270875 | -405816451
var11 = ((arg3 | (((1605404532 & arg4 + (var10 - arg4 - var9 & arg3)) ^ -1231414641) | 354) & -782527097) ^ (-44 ^ ((var9 & var9) | -1018372679) - -782 & arg3) + (var9 | var10 ^ arg3) | var9 & -2012637171) ^ 1221212954
var12 = arg4 - (arg4 | -113227876)
var13 = (662 + 1870126003) - -1612965323
var14 = -657 & arg3
result = var9 ^ (((var10 + var12) + ((var12 - var13) + (var10 ^ var9)) - (arg4 - -730) | var14) & 728) ^ var11
return result
def func3(arg5, arg6):
var7 = 0
for var8 in range(32):
if arg6 < arg5:
var7 += var7 - var7 | arg5
else:
var7 += arg6 + arg6
return var7
def func15(arg114, arg115):
def func16(acc, rest):
var116 = (-8 & -10) + 9
if acc == 0:
return var116
else:
result = func16(acc - 1, var116)
return result
result = func16(10, 0)
return result
def func24(arg168, arg169):
closure = [0]
def func25(acc, rest):
var170 = (4 & (-7 | (rest & -7)) ^ -7) ^ -7 | -7
closure[0] += var170
if acc == 0:
return var170
else:
result = func25(acc - 1, var170)
return result
result = func25(10, 0)
return result
def func31(arg201, arg202):
def func32(acc, rest):
var203 = -10 | -2 ^ -8
if acc == 0:
return var203
else:
result = func32(acc - 1, var203)
return result
result = func32(10, 0)
return result
def func39(arg220, arg221):
def func40(acc, rest):
var222 = 0 ^ acc - -10
if acc == 0:
return var222
else:
result = func40(acc - 1, var222)
return result
result = func40(10, 0)
return result
def func44(arg229, arg230):
def func45(acc, rest):
var244 = func46(-10, rest)
if acc == 0:
return var244
else:
result = func45(acc - 1, var244)
return result
result = func45(10, 0)
return result
if __name__ == "__main__":
print('prog_size: 5')
print('func_number: 7')
print('arg_number: 66')
for i in range(25000):
x = 5
x = func1(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 14')
print('arg_number: 112')
for i in range(25000):
x = 5
x = func7(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 23')
print('arg_number: 166')
for i in range(25000):
x = 5
x = func14(x, i)
print(x, end='')
print('prog_size: 1')
print('func_number: 26')
print('arg_number: 192')
for i in range(25000):
x = 5
x = func23(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 38')
print('arg_number: 218')
for i in range(25000):
x = 5
x = func26(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 48')
print('arg_number: 306')
for i in range(25000):
x = 5
x = func38(x, i)
print(x, end='') | UTF-8 | Python | false | false | 18,305 | py | 25,849 | 5bf281fb-178c-493c-8c7a-422ac4bf1fe0.py | 25,848 | 0.568479 | 0.316963 | 0 | 497 | 35.832998 | 233 |
shabana-123/github | 833,223,668,007 | 721aa46d08d29f3951622d8b166416d63afc254e | 9c5a7f1c65fafc89d59ceff76c400cf424bb17e0 | /example2.py | 1f671a8862a045f353928eeb285299888fdfd9d0 | []
| no_license | https://github.com/shabana-123/github | 989819eeeb7c36aeca23c6c41555fb75e5decf8f | c51c8dd0021f050ccc61e92b322a9e2a5606eeb2 | refs/heads/master | 2023-02-26T07:25:49.461072 | 2021-02-01T10:16:18 | 2021-02-01T10:16:18 | 334,840,090 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | x=input("enter input")
if x=="yes":
print("eat pizza")
else:
print("drink")
| UTF-8 | Python | false | false | 78 | py | 3 | example2.py | 1 | 0.628205 | 0.628205 | 0 | 5 | 14.6 | 22 |
wilsonhuangnice/AE401wilson | 627,065,256,244 | 77ab4e185b2640716efebb66ed70f9fd547a8337 | bb8a50d51cfb1817777f717c46d28ff902a619f3 | /1017.py | 06d916f2b94ce5e6b3fbf28f5177e4b3f896ad3c | []
| no_license | https://github.com/wilsonhuangnice/AE401wilson | 75708fbee65b7981429fdf555a195750fdb9455b | f0eb82ecd485111174ea713a63a74504f39ecbb2 | refs/heads/master | 2023-02-03T16:13:28.655717 | 2020-12-26T03:54:46 | 2020-12-26T03:54:46 | 296,778,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 17 10:20:15 2020
@author: michael
"""
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
x,y,z = mc.player.getTilePos()
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
x,y,z = mc.player.getTilePos()
mc.setBlock(x,y+1,z,57)
mc.setBlock(x,y+2,z,57)
mc.setBlock(x,y+3,z,57)
mc.setBlock(x,y+4,z,57)
mc.setBlock(x,y+5,z,57)
mc.setBlock(x,y+6,z,57)
mc.setBlock(x,y+7,z,57)
mc.setBlock(x,y+8,z,57)
mc.setBlock(x,y+9,z,57)
mc.setBlock(x,y+10,z,57)
mc.setBlock(x,y+11,z,57)
mc.setBlock(x,y+12,z,57)
mc.setBlock(x,y+13,z,57)
x,y,z = mc.player.getTilePos()
mc.setBlock(x,y,z+1,57)
mc.setBlock(x-1,y,z,57)
mc.setBlock(x+1,y,z,57)
mc.setBlock(x,y,z-1,57)
mc.setBlock(x-1,y,z+1,57)
mc.setBlock(x+1,y,z+1,57)
mc.setBlock(x-1,y,z-1,57)
mc.setBlock(x+1,y,z-1,57)
| UTF-8 | Python | false | false | 874 | py | 12 | 1017.py | 12 | 0.653318 | 0.556064 | 0 | 66 | 12.19697 | 36 |
emilykjensen/CSCI5352_Project | 10,110,353,048,909 | 725a483524e73745ab54c34568d7e4ae840c8b60 | c698a4452e4ced2daa51854c2a8b678eb5570bfd | /Code/video-processing.py | 55d9445252d161c9057d45a47e654ea28e10a679 | []
| no_license | https://github.com/emilykjensen/CSCI5352_Project | ab97bfce3e76e87e1152d52500517fe9c5ffa13a | 21d9ca6e54b097967ce1b8f338d5d07f99007c91 | refs/heads/master | 2020-11-24T10:26:21.431767 | 2019-12-16T19:43:12 | 2019-12-16T19:43:12 | 228,106,994 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random
from os import listdir
from networkx.algorithms import bipartite
##############################
### Combine 2-week files ###
##############################
data_dir = "/your/directory/here/"
files = listdir(data_dir)
dfs = []
for f in files:
df = pd.read_csv(data_dir + f)
dfs.append(df)
combined = pd.concat(dfs)
combined.to_csv(data_dir + 'video-data.csv')
######################
### Get datasets ###
######################
combined = pd.read_csv(data_dir+'video-data.csv')
sessions, session_counts = np.unique(combined['session_id'].values, return_counts = True)
students, student_counts = np.unique(combined['useraccount_id'].values, return_counts = True)
videos, video_counts = np.unique(combined['video_id'].values, return_counts = True)
print('There are {} unique sessions, {} students, and {} videos'.format(len(sessions),len(students),len(videos)))
pd.Series(session_counts).describe()
pd.Series(student_counts).describe()
pd.Series(video_counts).describe()
#########################
### Build a network ###
#########################
# One group is student IDs
# The other group is video IDs
G = nx.Graph()
G.add_nodes_from(students, bipartite=0)
G.add_nodes_from(videos, bipartite=1)
G.add_edges_from(list(zip(combined['useraccount_id'].values, combined['video_id'].values)))
cluster_all = bipartite.average_clustering(G)
density_all = bipartite.density(G)
# Get degrees for unprojected graph
student_degree = G.degree(students)
list_student_degree = [val for (node, val) in student_degree]
plt.hist(list_student_degree)
plt.xlabel('Number of Unique Videos')
plt.ylabel('Number of Students')
plt.show()
pd.Series(list_student_degree).describe()
video_degree = G.degree(videos)
list_video_degree = [val for (node, val) in video_degree]
plt.hist(list_video_degree)
plt.xlabel('Number of Unique Accessing Students')
plt.ylabel('Number of Videos')
plt.show()
pd.Series(list_video_degree).describe()
# Attempt at student projection
sample_rate = 0.3
n_sampled = int(sample_rate * len(students))
sampled_students = random.sample(list(students), n_sampled)
student_projection = bipartite.projected_graph(G,sampled_students) # MEMORY ERROR HERE
# Video projection
video_projection = bipartite.projected_graph(G,videos)
video_proj_degree = video_projection.degree()
list_video_proj_degree = [val for (node, val) in video_proj_degree]
plt.hist(list_video_proj_degree)
plt.xlabel('Number of Neighbors')
plt.ylabel('Number of Videos')
plt.show()
pd.Series(list_video_proj_degree).describe()
nx.density(video_projection)
# Degree centrality
deg = nx.degree_centrality(video_projection)
deg_list = list(deg.values())
pd.Series(deg_list).describe()
plt.hist(deg_list)
plt.xlabel('Degree Centrality')
plt.ylabel('Number of videos')
plt.show()
# Eigenvector centrality
eig = nx.eigenvector_centrality(video_projection)
eig_list = list(eig.values())
pd.Series(eig_list).describe()
plt.hist(eig_list)
plt.xlabel('Eigenvector Centrality')
plt.ylabel('Number of videos')
plt.show()
# Closeness centrality
close = nx.closeness_centrality(video_projection)
close_list = list(close.values())
pd.Series(close_list).describe()
plt.hist(close_list)
plt.xlabel('Closeness Centrality')
plt.ylabel('Number of videos')
plt.show()
# Betweenness centrality
between = nx.betweenness_centrality(video_projection)
between_list = list(between.values())
pd.Series(between_list).describe()
plt.hist(between_list)
plt.xlabel('Betweenness Centrality')
plt.ylabel('Number of videos')
plt.show()
# Triangles and clustering coefficient
tri = nx.triangles(video_projection)
tri_list = list(tri.values())
print("There are {} triangles".format(np.sum(tri_list)/3))
cluster = nx.clustering(video_projection)
cluster_list = list(cluster.values())
pd.Series(cluster_list).describe()
plt.hist(cluster_list,bins=15)
plt.xlabel('Clustering Coefficient')
plt.ylabel('Number of videos')
plt.show()
# Diameter
print("The diameter is {}".format(nx.diameter(video_projection)))
print("Because minimum degree is {}".format(min(deg_list)))
###########################
### Changes over time ###
###########################
combined['ts_created'] = pd.to_datetime(combined['ts_created'])
combined.index = combined['ts_created']
combined['month'] = combined.index.month
combined_sorted = combined.sort_values(by=['month'])
video_by_month = combined.groupby(['video_id',combined.index.month]).count()
student_by_month = combined.groupby(['useraccount_id',combined.index.month]).count().drop(columns=['ts_created'])
student_by_month = student_by_month.reset_index(level='ts_created')
# Month of first view per video
video_first = combined_sorted.drop_duplicates(subset='video_id')['month']
plt.hist(video_first.values)
plt.xlabel('Month of first view')
plt.ylabel('Number of Videos')
plt.show()
# Average month view per video
video_avg_month = combined.groupby(['video_id']).mean()['month']
plt.hist(video_avg_month.values,bins=25)
plt.xlabel('Average Month')
plt.ylabel('Number of Videos')
plt.show()
# Count of video views per month
plt.hist(combined.index.month.values)
plt.xlabel('Month')
plt.ylabel('Number of video views')
plt.show()
month_counts = combined['video_id'].groupby(combined.index.month).count()
pd.Series(month_counts).describe()
# Average number of student views per month
avgs = []
for i in range(1,7):
filtered = student_by_month.loc[student_by_month['ts_created'] == i]
avgs.append(np.mean(filtered['session_id'].values))
plt.plot(range(1,7),avgs)
plt.ylim((0,8))
plt.xlabel('Month')
plt.ylabel('Average video views per student')
plt.show()
| UTF-8 | Python | false | false | 5,861 | py | 4 | video-processing.py | 2 | 0.690667 | 0.687937 | 0 | 186 | 29.510753 | 113 |
yesito/Valuation | 16,922,171,152,137 | 660bf99051b4fa0eaae2672448a991f8d958c0d9 | 456cbffd38c331da14df0555a6ab0a0e990e2e08 | /conversor_pdf_df.py | 4ec36f3bd9b8ba5d4c34bbbd50258a9cbc2090e6 | []
| no_license | https://github.com/yesito/Valuation | ebb9048b2449b4bd0048c820179ee804d8f47583 | 5f289bad96b2f57dbc9ac15c852b32b03110c7bd | refs/heads/main | 2023-05-31T23:30:36.302775 | 2021-06-14T18:17:00 | 2021-06-14T18:17:00 | 376,911,680 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 20 13:20:54 2020
@author: lucab
"""
import pdftotext
from datetime import datetime
import pandas as pd
import sys
pd.set_option('display.max_columns', None) # or 1000
pd.set_option('display.max_rows', None) # or 1000
#print("Welcome! This app is intended to get financial data from companies in trimestral relatories (PDF)")
#print("and tranform this information into Data Frames, being able to export them as CSV files.")
#print("\n")
#print("To start, please:")
def main():
filename = "ITRweg1T2019" #insert only the file name whithout the extension
filename = filename.strip()
filename += ".pdf"
folderpath = "C:\\Users\\lucab\\OneDrive\\A&M\\DemonstrativosFinanceiros\\PDF\\weg2019" #insert the folder path where the pdf is:
folderpath = folderpath.strip()
pages = [6] #input the number of the pages in which the tablesheet is (list):
filepath = folderpath + "\\" + filename
listofpagetxt = read(filepath, pages)
#print(listofpagetxt)
pagelsts = txtorganizerinlst(listofpagetxt)
#print(pagelsts)
table = cleanheaderandfooter(pagelsts)
print(table)
perioddirty = perio(pagelsts)
label = extractlabel(table)
code = extractcode(table)
period = cleanperiod(perioddirty)
values = extractvalues(table)
print(values)
#print(len(values),len(code),len(label))
dictionary = createdict(label, code, period, values)
#print(dictionary)
dataframe = createdataframe(dictionary)
print (dataframe)
flag = True
while flag == True:
answer = input("do you wish to export your Data in a CSV format? (Answer Yes or No) ") .lower()
answer = answer.strip()
if answer == "yes":
exportcsv(dataframe)
print("file sucssessfully exported!")
flag = False
elif answer == "no":
flag = False
else:
print("invalid input")
flag = True
flag = True
while flag == True:
answer2 = input("Do you wish to make another Data Frame? (Answer Yes or No) ") .lower()
answer2 = answer2.strip()
if answer2 == "yes":
flag = False
main()
elif answer2 == "no":
while flag == True:
answer3 = input("Do you wish to exit? (Answer Yes or No) ") .lower()
answer3 = answer3.strip()
if answer3 == "yes":
flag == False
sys.exit()
elif answer3 == "no":
flag == False
main()
else:
print("invalid input")
flag == True
else:
print("invalid input")
flag = True
def read(filepath,pages):
listofpagen = []
listofpagetxt = []
with open (filepath, "rb") as infile:
pdfread = pdftotext.PDF(infile)
for i in range (0,len(pages),1):
page = pages[i]
listofpagen += [page]
for i in range (0,len(listofpagen),1):
pagetext = pdfread[listofpagen[i]-1]
listofpagetxt += [pagetext.split("\n")]
return listofpagetxt
def txtorganizerinlst(listofpagetxt):
pagelsts = []
for i in range (0,len(listofpagetxt),1):
pagelnlst = []
for j in range(0,len(listofpagetxt[i]),1):
linelst = listofpagetxt[i][j].split()
if linelst != []:
pagelnlst += [linelst]
pagelsts += pagelnlst
return pagelsts
def cleanheaderandfooter(pagelsts):
table = []
for i in range (0,len(pagelsts),1):
try:
new = ""
test = pagelsts[i][0]
for j in range(0,len(test),1):
if test[j] != ".":
new += test [j]
int(new)
except:
pass
else:
table += [pagelsts[i]]
return table
def perio(pagelsts):
period = []
for i in range (0,len(pagelsts),1):
for j in range(0,len(pagelsts[i]),1):
test = ""
if len(pagelsts[i][j]) == 10:
for l in range (0,len(pagelsts[i][j]),1):
if pagelsts[i][j][l] != "/":
test += pagelsts[i][j][l]
try:
int(test)
except:
pass
else:
if pagelsts[i] not in period:
period += [pagelsts[i]]
return period
def extractlabel(table):
x = 0
for j in range(len(table[1])-1,-1,-1):
valued = table[1][j]
valuelst = valued.split(".")
value = ""
for h in range(0,len(valuelst),1):
value += str(valuelst[h])
try:
value = int(value)
except:
break
else:
x += 1
lst = []
for i in range (0, len(table),1):
label = ""
for j in range(1,len(table[i])-x,1):
label += " " + table[i][j]
lst += [label.strip()]
return lst
def extractcode(table):
lst = []
for i in range(0,len(table),1):
lst += [table[i][0]]
return lst
def cleanperiod(period):
lst = []
for i in range(0,len(period),1):
for j in range (0,len(period[i]),1):
test = ""
for l in range (0, len(period[i][j]),1):
if period[i][j][l] != "/":
test += period[i][j][l]
try:
int(test)
except:
pass
else:
if period[i][j] not in lst and len(period[i][j]) == 10:
lst += [period[i][j]]
lstdt = []
for i in range(0,len(lst),1):
date = lst[i]
date_object = datetime.strptime(date, "%d/%m/%Y")
lstdt += [date_object]
lstdts = sorted(lstdt)
lst = []
for i in range(0,len(lstdts),1):
date = lstdts[i]
new = date.strftime("%d/%m/%Y")
lst += [new]
if len(lst) == 2:
return [lst[1],lst[0]]
elif len(lst) == 4:
return [lst[3],lst[1]]
else:
#print ("No period could be identified correctly")
return None
def extractvalues(table):
values = []
for i in range (0, len(table),1):
nested = []
nestedinv = []
for j in range(len(table[i])-1,-1,-1):
valued = table[i][j]
valuelst = valued.split(".")
value = ""
for h in range(0,len(valuelst),1):
value += str(valuelst[h])
try:
value = int(value)
except:
valued = table[i][j]
valuelst = valued.split(",")
value = ""
h = 0
for h in range(0,len(valuelst),1):
value += str(valuelst[h])
try:
value = int(value)
except:
break
else:
nested += [table[i][j]]
else:
nested += [table[i][j]]
for i in range(len(nested)-1,-1,-1):
nestedinv += [nested[i]]
values += [nestedinv]
return values
def createdict(label, code, period, values):
dictionary2 = {}
if period != None:
for i in range(0,len(period),1):
dictionary1 = {}
for j in range(0, len(label),1):
try:
dictionary1[code[j],label[j]] = values[j][i]
except:
dictionary1[code[j],label[j]] = None
dictionary2[period[i]] = dictionary1
return dictionary2
else:
dictionary1 = {}
for i in range(0,len(values[1]),1):
dictionary1 = {}
for j in range(0,len(label),1):
try:
dictionary1[code[j],label[j]] = values[j][i]
except:
dictionary1[code[j],label[j]] = None
dictionary2[i] = dictionary1
return dictionary2
def createdataframe(dictionary):
frame = pd.DataFrame(dictionary)
return frame
def exportcsv(dataframe):
folderpath = input("where do you want to export your .csv file (enter a folder path): ")
folderpath = folderpath.strip()
filename = input("how do you want to name your file? ")
filename = filename.strip()
filename += ".csv"
filepath = folderpath + "\\" + filename
csv = dataframe.to_csv(path_or_buf = filepath, encoding = "utf-8" )
return csv
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 9,275 | py | 5 | conversor_pdf_df.py | 4 | 0.466415 | 0.451968 | 0 | 317 | 27.246057 | 133 |
ertyseidohl/python-quiz | 8,890,582,318,232 | 285ed12566a3b269e26036ab488e3397d1bafa57 | 8e7a67f947c3b3c538e94f4159082d7c53856de9 | /examples/1.1.3.py | 0abf771dcc659093e50cecd764ef04e355b7880e | []
| no_license | https://github.com/ertyseidohl/python-quiz | cac5d485c082dae5af668cca4324b27381d491be | 8b9a7dfce077350402ea510869b7fda444f2970c | refs/heads/master | 2021-01-10T11:50:10.277444 | 2016-03-01T07:05:13 | 2016-03-01T07:05:13 | 52,807,932 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = 0
while (a < 10):
a = a + 1
print(a) | UTF-8 | Python | false | false | 41 | py | 40 | 1.1.3.py | 40 | 0.463415 | 0.365854 | 0 | 4 | 9.5 | 15 |
Rossbin/justapi | 17,179,869,219,505 | d76ef1f8f11bb25982d64f9cf7534eea21e8d179 | a47215cb2cadf3e2bb97ff0ff4b737490538f3ea | /scripts/celery_task/task1.py | a954b6f1cb4d11a9f0d6110387e355d8e3518719 | []
| no_license | https://github.com/Rossbin/justapi | a910f5e1f6995893e53e8eac686632a27620a64b | 809c8793406245cf13298a7b2f52593590c37c70 | refs/heads/master | 2023-04-18T11:50:17.642279 | 2021-05-04T08:44:28 | 2021-05-04T08:44:28 | 350,808,584 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .celery import app
@app.task
def add(x,y):
print(x,y)
return x+y
# @app.task
# def updata_banner():
#
# # 在脚本中调用django项目
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "untitled15.settings")
# import django
# django.setup()
#
# from app01 import models
# from django.core.cache import cache
# books = models.Book.objects.all()
# print(books)
| UTF-8 | Python | false | false | 411 | py | 64 | task1.py | 63 | 0.635443 | 0.625316 | 0 | 20 | 18.65 | 76 |
punish4424/django-rest-framework-boilerplate | 4,587,025,095,658 | fc1786083c47b3b4e966ea203e6454b7d96c5c03 | 3ebbdacadaf648255e1d8c6a32728f063da605ab | /{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/apps/utils/serializers.py | a8ae73bd57131d3812847df9ab1a433d9fed1ccf | [
"BSD-2-Clause-Views",
"BSD-3-Clause"
]
| permissive | https://github.com/punish4424/django-rest-framework-boilerplate | 223daa483759d0a7edda95426cfc90312c1ed1cd | 618c6f66d1c4c4deaf50e4e996107fb65c836589 | refs/heads/main | 2023-05-09T00:34:23.994184 | 2021-06-05T22:04:23 | 2021-06-05T22:04:23 | 374,174,241 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import get_user_model
from rest_framework import serializers
User = get_user_model()
class CustomUserSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
super(CustomUserSerializer, self).__init__(*args, **kwargs)
if self.context and self.context['request'].method in ["PUT", "PATCH"]:
self.fields.pop('email')
self.fields.pop('password')
password = serializers.CharField(style={'input_type': 'password'}, trim_whitespace=False, write_only=True)
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
def to_representation(self, instance):
rep = super().to_representation(instance)
rep['email'] = instance.email
return rep
| UTF-8 | Python | false | false | 816 | py | 14 | serializers.py | 8 | 0.664216 | 0.664216 | 0 | 23 | 34.478261 | 110 |
ausaki/data_structures_and_algorithms | 10,110,353,048,340 | 1d34044209e34f29e7c9f67fca9b32070d7fd4d7 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/unique-binary-search-trees-ii/147199063.py | dd1e188a3b2577f34995a65b4425d3fbb4f6bafb | []
| no_license | https://github.com/ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # title: unique-binary-search-trees-ii
# detail: https://leetcode.com/submissions/detail/147199063/
# datetime: Tue Mar 27 16:59:18 2018
# runtime: 104 ms
# memory: N/A
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def copyTree(self, root):
if root is None:
return None
parent = TreeNode(root.val)
parent.left = self.copyTree(root.left)
parent.right = self.copyTree(root.right)
return parent
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
# Dynamic Programing
# T(n) = C(n, T(n - 1))
if n == 0:
return []
if n == 1:
return [TreeNode(1)]
new_trees = []
trees = self.generateTrees(n - 1)
node_n = TreeNode(n)
# node_n as the new root node
# node_n.left = old_root
for tree in trees:
node_n = TreeNode(n)
node_n.left = tree
new_trees.append(node_n)
for tree in trees:
current_node = tree
while current_node:
root = self.copyTree(tree)
node = root
while node.val != current_node.val:
node = node.right
node_n = TreeNode(n)
right_node = node.right
node.right = node_n
node_n.left = right_node
new_trees.append(root)
current_node = right_node
return new_trees
| UTF-8 | Python | false | false | 1,752 | py | 2,030 | 147199063.py | 1,912 | 0.480023 | 0.46347 | 0 | 61 | 27.442623 | 60 |
ashleyconnor/leetcode | 13,589,276,531,731 | 6f2e105b2e5e01cdc82d3fc4c6ff20e9ce4576cf | 8945f7e126b88b036a375985365979c3ba611973 | /src/two_sum/test_solution.py | d0da5b55ec6f35483911d24dd8b9c5bc4e9d1162 | []
| no_license | https://github.com/ashleyconnor/leetcode | 11a9fb7ccaa45f7f69daef0692c30f55ff04102d | 1f2a9ce6c2eff85536537207db9cb322868ef1fd | refs/heads/master | 2021-06-26T11:43:39.477836 | 2021-01-09T18:39:06 | 2021-01-09T18:39:06 | 201,158,333 | 0 | 0 | null | false | 2021-01-09T18:39:07 | 2019-08-08T01:58:32 | 2021-01-09T18:26:09 | 2021-01-09T18:39:07 | 28 | 0 | 0 | 0 | Python | false | false | import pytest
from .solution import Solution
class TestSolution:
@pytest.mark.parametrize(
"numbers,target,expected",
[([2, 7, 11, 15], 9, (0, 1)), ([5, 4, 3, 10, 7], 12, (0, 4))],
)
def test_two_sum(self, numbers, target, expected):
assert Solution().two_sum(numbers, target) == expected
| UTF-8 | Python | false | false | 328 | py | 23 | test_solution.py | 15 | 0.594512 | 0.536585 | 0 | 12 | 26.333333 | 70 |
sankalp30/shuford_costing | 7,327,214,209,403 | 6c163d40d2eb9276a2ffe2ebed842e5c36b91ee4 | 49a458025f3c3afd7d75448f72ebaf97782a3496 | /hs_allstds.py | df2a257d1871e3636d254798c3854bc561c4d29f | []
| no_license | https://github.com/sankalp30/shuford_costing | 3986fae1ff2ce94e494919db0c1aaa4e8ad86003 | 707968d38b30c1ce19460b55ab3cbafd3522f4d4 | refs/heads/master | 2022-12-22T13:17:56.747613 | 2020-09-29T13:01:45 | 2020-09-29T13:01:45 | 299,617,881 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 13:19:42 2020
@author: SankalpMishra
"""
import numpy as np
import pandas as pd
import xlwings as xw
import time
#%%
start_time = time.time()
path = r'\\shufordyarnsllc.local\SYDFS\UserFiles$\SankalpMishra\Desktop\Sankalp_all\cost_assistanceproject_8_19_2019\Hickory Spinners'
path = path.replace("\\", "/")
col_cno = ['cno', 'yno', 'ply', 'blend', 'tr', 'inv_description', 'case', 'units', 'tare', 'customer']
#df_cno = pd.read_excel(path + 'Cheat Sheet 7-19-20' + '.xlsx', sheet_name = 'ALL YARNS' , usecols = [0,1,2,3,4,5,6,7,8,9])
#df_cno.columns = col_cno
#df_cno['cno'] = df_cno['cno'].astype(str)
#%%
#df_select = df_cno[df_cno['cno'].isin(list(df_input['cno']))]
#%%not used
def proc_pkgwt(inv_desc):
inv = inv_desc.lower()
inv = inv.replace('"', '')
wax_tokens = ['wax', 'wx']
bag_tokens = ['bag', 'bg']
cond_tokens = ['cond', 'cd']
l_pkgtype = ['351', '557', '5406rn', 'owt', 'pt', 'pencil tube', 'penciltube', '190', \
'190dt', 'cs', '150', '150dt', 'dt', 'OWT5406RN', 'mf', '190mf', '150mf']
l_inv = inv.split(' ')
for token in l_inv:
if 'x' in list(token) and 'w' not in list(token) and len(token)> 1: #wax should contain a w with x but dimension token will only have x
pkg_len = token.split('x')[0]
pkg_dia = token.split('x')[1]
if token == 'wax' or token == 'wx':
pass
return l_inv, pkg_len, pkg_dia
#%% Not used
def twist_breakdown(twist):
try:
tw_sp = int(''.join(list(str(twist))[:2]))# don't divide by 10. It is done in integrated standards sheet
except:
tw_sp = 0
try:
tw_tw = int(''.join(list(str(twist))[2:4]))
if tw_tw >15:
tw_tw = tw_tw/10 # this division by 10 is not done in the standards sheet
else:
pass
except:
tw_tw = 0
try:
if len(list(twist))>4:
tw_cable = tw_tw = int(''.join(list(str(twist))[4:6]))
if tw_tw >15:
tw_cable = tw_cable/10 # this division by 10 is not done in the standards sheet
else:
pass
else:
tw_cable = 0
except:
tw_cable = 0
return tw_sp, tw_tw, tw_cable
#%%
def putup_breakdown(putup):
ls_dt = ['190', '150']
ls_tube = ['owt']
ls_cone = ['557', '351']
pw_tube = ['pwt', 'pw']
putup = str(putup).lower()[:3]
if putup in ls_dt:
return 'dyetube'
elif putup in ls_tube:
return 'tube'
elif putup in ls_cone:
return 'cone'
elif putup in pw_tube:
return 'pwt'
else:
return 'default'
def blend_extract(cno):
try:
return str(cno)[:3]
except:
return 'default'
#%%
df_input = pd.read_excel(path + '/mar_2020/mar20_hs_pp' + '.xlsx', sheet_name= 'hs_pp')
df_input['cno'] = df_input['Item Number'].map(lambda x: str(x)[:-6])
df_input['cno'] = df_input['cno'].map(lambda x: str(x)[-6:])
#%%
df_cno = pd.read_csv(path+ '/mar_2020/dfall_hs_mar20.csv')
df_cno['cno'] = df_cno['cno_itemnum'].map(lambda x: str(x)[-6:])
#%%
wb = xw.Book(r'//shufordyarnsllc.local/SYDFS/UserFiles$/SankalpMishra/Desktop/Sankalp_all/standards_costing_program/HS/tbl/HS_Standards_all.xlsm')
sheet = wb.sheets['Main']
app = wb.app
#blend_summary_mcro = app.macro("carding_drawing_roving_summary") - example from dudley shoals plant script
summary_macro = app.macro("HS_summary")
#%%
purchased_list = ['ct1', 'ct2', 'pyn']
ls_notfound = []
for item in list(df_input['cno']):
try:
print(item)
df_current = df_cno[df_cno['cno'] == item]
cno_item = df_current['cno_itemnum'].values[0]
cno_yno = float(str(cno_item)[4:8])/100
cno_ply = df_current['ply'].values[0]
tw_sp = df_current['tw_sp'].values[0]
tw_tw = df_current['tw_tw'].values[0]
cno_wax = df_current['cno_wax'].values[0]
cno_bag = df_current['cno_bag'].values[0]
cno_cond = df_current['cno_cond'].values[0]
cno_doubspeed = df_current['cno_doubspeed'].values[0]
cno_twrpm = df_current['cno_twistrpm'].values[0]
cno_spinrpm = df_current['cno_spinspeed'].values[0]
cno_pkg = df_current['cno_pkg'].values[0]
cno_putup = df_current['cno_putup'].values[0]
cno_putup = putup_breakdown(cno_putup)
abbrev = blend_extract(cno_item)
cno_oil = df_current['cno_oil'].values[0]
print('base variables extracted')
print(cno_item, cno_yno, cno_ply, tw_sp, tw_tw, cno_wax, cno_bag, cno_cond, cno_doubspeed, \
cno_twrpm, cno_spinrpm, cno_pkg, cno_putup, abbrev, cno_oil)
#%%Basic setup
sheet.range("A4").value = cno_item
sheet.range("B4").value = abbrev
sheet.range("C4").value = tw_sp
sheet.range("C5").value = tw_tw
sheet.range("E4").value = cno_yno
sheet.range("E5").value = cno_pkg
sheet.range("G4").value = cno_ply
if str(cno_wax).lower() == 'wx':
sheet.range("J4").value = 'Yes'
else:
sheet.range("J4").value = 'No'
if str(cno_bag).lower() == 'bg':
sheet.range("K4").value = 'Yes'
else:
sheet.range("K4").value = 'No'
pkg_wt = sheet.range("H4").value
if cno_ply > 1:
creel_wt = pkg_wt/1.8
# not included creel per crate variable as it is not used in HS doubling/ twisting sheets
else:
creel_wt = pkg_wt
sheet.range("J6").value = creel_wt # possible error point if a construction number is skipped!
print('basic sheet setup done')
#%%carding setup
sheet.range("A9").value = 'Yes'
sheet.range("B10").value = abbrev
print('carding setup complete')
#%% Drawing setup
sheet.range("D9").value = "Yes"
sheet.range("E10").value = abbrev
print('drawing setup complete')
#%%ACO8 Spin setup
sheet.range("G9").value = "Yes"
#sheet.range("H10").value == ___ #skipping blend match for aco as it doesn't affect output
if cno_ply == 1:
sheet.range("H14").value = 'Sales'
sheet.range("H21").value = 'Wood Pallet TP-Sales'
else:
sheet.range("H14").value = 'Doubler'
sheet.range("H21").value = 'Crate'
if cno_putup == 'dyetube':
sheet.range("H20").value = 'Case-DT'
sheet.range("H22").value = 'Yes'
else:
sheet.range("H20").value = 'Crate-PL' # not much difference between case-pa and crate-pl
sheet.range("H22").value = 'No'
sheet.range("H15").value = cno_spinrpm
sheet.range("H19").value = 0.9
aco_efftemp = sheet.range("H27").value
sheet.range("H19").value = aco_efftemp
print('aco8 setup complete')
#%% Doubler setup
if cno_ply>1:
sheet.range("A20").value = 'Yes'
else:
sheet.range("A20").value = 'No'
sheet.range("B24").value = cno_doubspeed
rcom_machine = sheet.range("C27").value
sheet.range("B27").value = rcom_machine
print('doubler setup complete')
#%% Twisting setup
if cno_ply > 1:
sheet.range("A37").value = "Yes"
else:
sheet.range("A37").value = "No"
# recommended machine --> sheet.range("B42").value = rcom_machine
sheet.range("B43").value = cno_twrpm
if cno_putup == 'dyetube':
sheet.range("B46").value = "Yes" # tieoff
sheet.range("B51").value = 'No' # label
else:
sheet.range("B46").value = "No"
sheet.range("B51").value = 'Yes'
if str(cno_oil).lower() == 'yes':
sheet.range("B52").value = 'Yes'
else:
sheet.range("B52").value = 'No'
print('twist setup complete')
#%% Pencil Winder setup
if cno_putup == 'pwt':
sheet.range("D20").value = 'Yes'
else:
sheet.range("D20").value = 'No'
print('pencil winder setup complete')
#%%conditioning setup
if str(cno_cond).lower() == 'cd':
sheet.range("D44").value = 'Yes'
sheet.range("E47").value = 2
else:
sheet.range("D44").value = 'No'
print('condition setup complete')
#%% shipping/ receiving setup
sheet.range("G37").value = 'Yes'
print('shipping setup complete')
#%%Pyn /CT1 removal, Macro run
if str(abbrev).lower() in purchased_list:
sheet.range("A9").value = "No"
sheet.range("D9").value = "No"
sheet.range("G9").value = "No"
print('running summary macro')
summary_macro()
print('--------||---------------||---------'*4)
#%%
except:
print(item, 'not_found')
ls_notfound.append(item)
#%%
end_time = time.time()
print('numbers not found:')
print(ls_notfound)
print('script runtime', (end_time - start_time)/60, 'minutes')
| UTF-8 | Python | false | false | 9,783 | py | 8 | hs_allstds.py | 7 | 0.507411 | 0.481958 | 0 | 299 | 30.665552 | 146 |
LennartKeller/exmlc | 7,971,459,302,881 | e7ee6dd6e12d22a76332126b48719aa4cc2001bc | 949c48edba7a2fd114a5565ec5021901c6ba8811 | /exmlc/tag_embeddings/word_embeddings.py | 924c78613311ea2af4a0e93da8893e4f46f388bd | [
"Apache-2.0"
]
| permissive | https://github.com/LennartKeller/exmlc | 0b5882bae339351d15739c09c7023b15dde3872b | 912d51451a448b5d56bfe921bfdaaea82c7b7084 | refs/heads/master | 2022-04-01T08:22:45.995417 | 2020-02-18T14:13:00 | 2020-02-18T14:13:00 | 213,160,571 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import annotations
import numpy as np
from sklearn.base import BaseEstimator
from gensim.models import Word2Vec
from scipy.sparse import csr_matrix, lil_matrix
from typing import *
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
from gensim.models.fasttext import FastText
from gensim.models.word2vec import Word2Vec
from sklearn.exceptions import NotFittedError
import logging
from tqdm import tqdm
from gensim.similarities.docsim import WmdSimilarity
class Word2VecTagEmbeddingClassifier(BaseEstimator):
def __init__(self,
embedding_dim: int = 300,
min_count: int = 1,
window_size: int = 5,
epochs: int = 10,
model: str = 'doc2vec',
distance_metric: str = 'cosine',
tfidf_weighting: bool = True,
pooling_func: callable = lambda x: np.mean(x, axis=0), # column wise average
n_jobs: int = 1,
verbose: bool = False
) -> None:
self.embedding_dim = embedding_dim
self.min_count = min_count
self.window_size = window_size
self.epochs = epochs
self.model = model
self.distance_metric = distance_metric
self.tfidf_weighting = tfidf_weighting
self.pooling_func = pooling_func
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X: np.array, y: csr_matrix):
if self.verbose:
#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# TODO revert this
pass
X_splitted = np.array([s.split() for s in X])
#docs = [TaggedDocument(words=tokens, tags=[index]) for index, tokens in enumerate(X_splitted)]
if self.model.lower() == 'fasttext':
self.wv_model_ = FastText(
sentences=X_splitted.tolist(),
size=self.embedding_dim,
iter=self.epochs,
min_count=self.min_count,
window=self.window_size,
workers=self.n_jobs
)
elif self.model.lower() == 'doc2vec':
self.wv_model_ = Word2Vec(
sentences=X_splitted.tolist(),
size=self.embedding_dim,
iter=self.epochs,
min_count=self.min_count,
window=self.window_size,
workers=self.n_jobs,
)
else:
raise NotImplementedError
tag_doc_mapping = self._create_tag_docs(y)
if self.tfidf_weighting:
self.tfidf_ = TfidfVectorizer()
self.texts_tfidf_ = self.tfidf_.fit_transform(X)
self.tag_embeddings_ = np.empty((y.shape[1], self.embedding_dim), dtype='float64')
if self.verbose:
tac_doc_iterator = tqdm(enumerate(tag_doc_mapping), desc='Computing tag embeddings')
else:
tac_doc_iterator = enumerate(tag_doc_mapping)
for tag_id, texts_idx in tac_doc_iterator:
# will be of shape(n_texts, embedding_dim)
tag_word_embeddings = []
for text_ind in texts_idx:
for token in list(set(X_splitted[text_ind])):
try:
word_embedding = self.wv_model_.wv[token]
except KeyError:
# if words occur that are ignored due to min_count
continue
if self.tfidf_weighting:
token_ind = self.tfidf_.vocabulary_.get(token, -1)
if token_ind > -1:
tfidf_value = self.texts_tfidf_[text_ind, token_ind]
word_embedding = word_embedding * tfidf_value
tag_word_embeddings.append(word_embedding)
self.tag_embeddings_[tag_id] = self.pooling_func(tag_word_embeddings)
return self
def predict(self, X: List[str], n_labels: int = 10) -> np.array:
if not hasattr(self, 'tag_embeddings_'):
raise NotFittedError
X_splitted = [s.split() for s in X]
X_embeddings = []
for text in X_splitted:
text_word_embeddings = []
for token in text:
try:
word_embedding = self.wv_model_.wv[token]
except KeyError:
continue
text_word_embeddings.append(word_embedding)
X_embeddings.append(self.pooling_func(text_word_embeddings))
nn = NearestNeighbors(metric=self.distance_metric, n_neighbors=n_labels, n_jobs=self.n_jobs)
nn.fit(self.tag_embeddings_)
y_pred = lil_matrix((len(X), self.tag_embeddings_.shape[0]), dtype='int8')
for sample_ind, text_embedding in enumerate(X_embeddings):
nearest_neighbors = nn.kneighbors([text_embedding])[1][0]
y_pred[sample_ind, nearest_neighbors] = 1
return y_pred.tocsr()
def decision_function(self, X: List[str], n_labels: int = 10):
if not hasattr(self, 'tag_embeddings_'):
raise NotFittedError
X_splitted = [s.split() for s in X]
X_embeddings = []
for text in X_splitted:
text_word_embeddings = []
for token in text:
try:
word_embedding = self.wv_model_.wv[token]
except KeyError:
continue
text_word_embeddings.append(word_embedding)
X_embeddings.append(self.pooling_func(text_word_embeddings))
nn = NearestNeighbors(metric=self.distance_metric, n_neighbors=n_labels, n_jobs=self.n_jobs)
nn.fit(self.tag_embeddings_)
y_pred = lil_matrix((len(X), self.tag_embeddings_.shape[0]), dtype='float')
for sample_ind, sample_vec in enumerate(X_embeddings):
distances, indices = nn.kneighbors([sample_vec])
for distance, label_index in zip(distances, indices):
y_pred[sample_ind, label_index] = distance
return y_pred.tocsr()
def log_decision_function(self, X: Iterable[str], n_labels: int = 10):
if not hasattr(self, 'tag_embeddings_'):
raise NotFittedError
# TODO Uncomment this if sure that nothing will break
distances = self.decision_function(X=X, n_labels=n_labels)
log_distances = self._get_log_distances(distances)
return log_distances
def _get_log_distances(self, y_distances: csr_matrix, base=0.5) -> csr_matrix:
"""
Returns the logarithmic version (base default: 0.5) of the distance matrix returned by TODO.
This must be used in order to compute valid precision@k scores
since small Distances should be ranked better than great ones.
:param y_distances: sparse distance matrix (multilabel matrix with distances instead of binary indicators)
:param base: base of the log function (must be smaller then one)
:return: sparse matrix with the log values
"""
log_y_distances = y_distances.tocoo()
log_y_distances.data = np.log(log_y_distances.data) / np.log(base)
return log_y_distances.tocsr()
def _create_tag_docs(self, y: csr_matrix) -> np.ndarray:
"""
Creates a mapping of each tags and their associated texts.
:param y: sparse label matrix
:return: array of shape (n_labels,) containing the indices of each text connected to a label
"""
self.classes_ = y.shape[1]
if self.verbose:
print('Sorting tag and docs')
iterator = tqdm(y.T)
else:
iterator = y.T
tag_doc_idx = list()
for tag_vec in iterator:
t = tag_vec.nonzero()
pos_samples = tag_vec.nonzero()[1] # get indices of pos samples
tag_doc_idx.append(pos_samples)
return np.asarray(tag_doc_idx)
def wmd(self, X_train, y_train, X_test, n_labels: int = 10, n_ev: int = 2):
"""
Compute docs similarity scores using the word mover distance (Kusner et. al, 2015)
Since this is computationally expensive because every docs from test set has to be compared to each doc
in train set the centroid optimatzion as described by Kusner et. al is used.
:param X_train:
:param X_test:
:param n_labels: number of desired label to predict
:param n_ev: factor for size of search space
the search space for wdm which is precomputed will be of size n_labels * n_ev
:return:
"""
# Compute and store mean doc embedding for each doc in train-set
# => is done while fitting so we can use the self.tag_embeddings_ attribute
# For each sample in X_test
X_embeddings = [] # store mean doc embeddings for X_test
for x_sample in X_test:
x_sample = x_sample.split()
# Compute mean doc embedding for test doc
x_embeddings = []
for token in x_sample:
try:
word_embedding = self.wv_model_.wv[token]
except KeyError:
continue
x_embeddings.append(word_embedding)
X_embeddings.append(np.mean(x_embeddings, axis=0))
nn = NearestNeighbors(n_neighbors=n_labels * n_ev).fit(self.tag_embeddings_)
X_nearest_tags = nn.kneighbors(X_embeddings)[1] # indices of most simiilar tag docs
# recompute tag docs
tag_doc_idx = self._create_tag_docs(y_train)
tag_docs = [[] for _ in range(len(tag_doc_idx))]
for doc_idx, entry in zip(tag_doc_idx, tag_docs):
for doc_id in doc_idx:
entry.extend(X_train[doc_id].split())
tag_docs = np.array(tag_docs)
results = []
y_pred = lil_matrix((X_test.shape[0], y_train.shape[1]), dtype='int8')
if self.verbose:
iterator = tqdm(enumerate(zip(X_nearest_tags, X_test)), desc='Computing wdm distances')
else:
iterator = enumerate(zip(X_nearest_tags, X_test))
for sample_ind, (nearest_tag_doc_idx, x_sample) in iterator: # TODO fix typo in loop var
wmd = WmdSimilarity(tag_docs[nearest_tag_doc_idx], self.wv_model_, num_best=n_labels)
sim_mat = wmd[x_sample.split()]
results.append(nearest_tag_doc_idx[[i[0] for i in sim_mat]])
y_pred[sample_ind, nearest_tag_doc_idx[[i[0] for i in sim_mat]]] = 1
return y_pred
if __name__ == '__main__':
X = np.array([
'Das ist ein Auto',
'Das ist ein Kino',
'Das ist ein Buch',
'Das ist ein Zug',
'Das ist ein Flugzeug',
'Das ist ein Computer'
])
y = [
[1, 2],
[1, 4],
[4, 5, 6],
[1, 2, 3],
[2, 5, 1],
[9, 7]
]
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
mb = MultiLabelBinarizer(sparse_output=True)
y_train = mb.fit_transform(y_train)
y_test = mb.transform(y_test)
import pandas as pd
from exmlc.preprocessing import clean_string
from exmlc.metrics import sparse_average_precision_at_k
df = pd.read_csv('~/ba_arbeit/BA_Code/data/Stiwa/df_5.csv').dropna(subset=['keywords', 'text'])
df, df_remove = train_test_split(df, test_size=0.99, random_state=42)
df.keywords = df.keywords.apply(lambda x: x.split('|'))
df.text = df.text.apply(lambda x: clean_string(x, drop_stopwords=True))
df_train, df_test = train_test_split(df, test_size=0.2, random_state=42)
X_train = df_train.text.to_numpy()
X_test = df_test.text.to_numpy()
mlb = MultiLabelBinarizer(sparse_output=True)
y_train = mlb.fit_transform(df_train.keywords)
y_test = mlb.transform(df_test.keywords)
clf = Word2VecTagEmbeddingClassifier(embedding_dim=300,
min_count=5,
model='doc2vec',
epochs=20,
window_size=5,
tfidf_weighting=False,
verbose=True,
n_jobs=4)
clf.fit(X_train, y_train)
#y_scores = clf.log_decision_function(X_test, n_labels=10)
y_pred = clf.wmd(X_train, y_train, X_test)
#print(sparse_average_precision_at_k(y_test, y_scores, k=3))
from sklearn.metrics import f1_score
print(f1_score(y_test, y_pred, average='macro'))
| UTF-8 | Python | false | false | 12,808 | py | 24 | word_embeddings.py | 23 | 0.575109 | 0.567458 | 0 | 324 | 38.530864 | 114 |
microsoft/Recognizers-Text | 16,664,473,131,387 | cb6fda1d47d8a8f6527da99ee15f62c227b6d93a | af02c1c94e1447adb32b35a9733a609294b286b8 | /Python/libraries/recognizers-date-time/recognizers_date_time/date_time/german/time_extractor_config.py | 8f16dbb335988d8dcb41a3de22498342862dcf63 | [
"MIT"
]
| permissive | https://github.com/microsoft/Recognizers-Text | 1bb885d05ac5ea14abc6e07656220e03de9de8e5 | 8d2eeda04e4eeef00bb00b1ae886289c84a6b67f | refs/heads/master | 2023-09-01T15:09:42.815524 | 2023-07-31T20:42:44 | 2023-07-31T20:42:44 | 88,544,417 | 1,188 | 324 | MIT | false | 2023-09-14T14:27:21 | 2017-04-17T19:45:47 | 2023-09-14T02:59:02 | 2023-09-14T14:26:25 | 50,939 | 1,593 | 426 | 190 | C# | false | false | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List, Pattern
from recognizers_text.utilities import RegExpUtility
from ...resources.german_date_time import GermanDateTime
from ..base_time import TimeExtractorConfiguration
from ..utilities import DateTimeOptions
from ..base_timezone import BaseTimeZoneExtractor
from .timezone_extractor_config import GermanTimeZoneExtractorConfiguration
class GermanTimeExtractorConfiguration(TimeExtractorConfiguration):
@property
def time_zone_extractor(self):
return self._time_zone_extractor
@property
def time_regex_list(self) -> List[Pattern]:
return self._time_regex_list
@property
def at_regex(self) -> Pattern:
return self._at_regex
@property
def ish_regex(self) -> Pattern:
return self._ish_regex
@property
def time_before_after_regex(self) -> Pattern:
return self._time_before_after_regex
def __init__(self):
super().__init__()
self._time_regex_list: List[Pattern] = GermanTimeExtractorConfiguration.get_time_regex_list(
)
self._at_regex: Pattern = RegExpUtility.get_safe_reg_exp(
GermanDateTime.AtRegex)
self._ish_regex: Pattern = RegExpUtility.get_safe_reg_exp(
GermanDateTime.IshRegex)
self._time_before_after_regex: Pattern = RegExpUtility.get_safe_reg_exp(
GermanDateTime.TimeBeforeAfterRegex)
self._options = DateTimeOptions.NONE
self._time_zone_extractor = BaseTimeZoneExtractor(
GermanTimeZoneExtractorConfiguration())
@staticmethod
def get_time_regex_list() -> List[Pattern]:
return [
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex1),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex2),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex3),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex4),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex5),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex6),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex7),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex8),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex9),
RegExpUtility.get_safe_reg_exp(GermanDateTime.TimeRegex10),
RegExpUtility.get_safe_reg_exp(GermanDateTime.ConnectNumRegex)
]
| UTF-8 | Python | false | false | 2,525 | py | 1,927 | time_extractor_config.py | 1,013 | 0.69901 | 0.694653 | 0 | 63 | 39.079365 | 100 |
kannan2796/Academic-Projects | 17,910,013,636,390 | c805ff1a6b37a2fc19fbae81fc41f14d37f3de2b | b7ea3e54b911be16766eb1cd00c7af06b8d0f093 | /Credit card Defaulter/SVM - Final2.py | cd24cabbd6867da083a1a893aa6b71af28ec9cd0 | []
| no_license | https://github.com/kannan2796/Academic-Projects | 8ba72819ffb5b40af8014457732a8f5f3e5117ef | 6af3e7692152d30f96d9e569b67f8ffbd88e091d | refs/heads/master | 2021-05-15T20:54:56.626678 | 2018-02-02T04:30:29 | 2018-02-02T04:30:29 | 107,909,916 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # changing the directory
import os
print (os.getcwd()) # see where you are
os.chdir(r'C:\Users\iamka\Desktop\Quarter 5\Python\Final project')
# KANNAN
# CREDIT CARD DEFAULT PREDICTION
# MODEL : LOGISTIC REGRESSION
# DATA : 12/04/2017 - PYTHON FINAL PRESENTATION
# Libraries
import pandas as pd
import numpy as np
import sklearn as sk
# Load Data File
df = pd.read_excel('default of credit card clients1.xlsx')
# Transformation of columns
#j='default payment next month'
#df[j] = df[j].astype(int)
#corr = {'asis':0, 'log':0, 'exp':0, 'sqrt':0, 'pow2':0}
#for i in df.columns.tolist():
# if df.dtypes[i] != 'object':
# corr['asis'] = abs(np.corrcoef(df[i], df[j])[1][0])
# if all((df[i]>=0)):
# corr['log'] = abs(np.corrcoef(np.log(df[i] + 0.00001), df[j])[1][0])
# corr['sqrt'] = abs(np.corrcoef(np.sqrt(df[i] + 0.00001), df[j])[1][0])
# else:
# corr['log'] = 0
# corr['sqrt'] = 0
# corr['exp'] = abs(np.corrcoef(np.exp(df[i]), df[j])[1][0])
# corr['pow2'] = abs(np.corrcoef(np.power(df[i],2), df[j])[1][0])
# if max(corr,key=corr.get) == 'asis':
# df[i]=df[i]
# print 'asis:',i
# elif max(corr,key=corr.get) == 'log':
# df[i]=abs(np.log(df[i]))
# print 'log',i
# elif max(corr,key=corr.get) == 'sqrt':
# df[i]=np.sqrt(df[i])
# print 'sqrt',i
# elif max(corr,key=corr.get) == 'exp':
# df[i]=abs(np.exp(df[i]))
# print 'exp',i
# else:
# print 'power',i
# df[i]=np.power(df[i],2)
#
#
#df1= pd.DataFrame(df)
#df1=df1.replace([np.inf, -np.inf], np.nan) # replace all infinite values with NA values
#df1=df1.dropna() # drop the row if any of the values is NA
#df=df1
# Feature Extraction with RFE
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
# load data
array = df.values
X = array[:,0:23]
Y = array[:,-1]
# feature extraction
model = LogisticRegression()
rfe = RFE(model, 7) # Selecting top 9 variables from df
fit = rfe.fit(X, Y)
print("Num Features: %d") % fit.n_features_
print("Selected Features: %s") % fit.support_
print("Feature Ranking: %s") % fit.ranking_
a=fit.ranking_
# Dropping columns based on variable selection
df.drop(df.columns[[0,2,4,8,9,10,11,12,13,14,15,16,19,20,21,22]], axis=1, inplace=True)
# Splitting the predictor and target variable
x1 = df.values #returns a numpy array
X = x1[:,0:6]
Y=x1[:,-1]
# Scaling the filtered columns
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
rescaledX = scaler.fit_transform(X)
df1 = pd.DataFrame(rescaledX) # summarize transformed data
# Assigning Values to X & Y after data transformation
X =df1.iloc[:,0:6]
Y =df.iloc[:,-1]
# Splitting Data set into Train and Test
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y)
# Create SVM classification object
from sklearn import svm
model = svm.SVC(kernel='rbf', C=100, gamma=10) # tune model by using kernel='linear', C=1,10, gamma=0.2,10,100,1000)
model.fit(X_train, Y_train) #Model created using train data
model.score(X_test, Y_test) #Model Accuracy
#Predict Output
predicted= model.predict(X_test)
# Confusion Matrix, Accuracy & Classification Report
from sklearn.metrics import confusion_matrix, accuracy_score,classification_report
cnf_matrix = confusion_matrix(Y_test,predicted) # Confusion matrix to calculate Precision, Recall
print(classification_report(Y_test,predicted))
print(accuracy_score(Y_test,predicted))
# Performing Cross Validation
from sklearn.model_selection import cross_val_score
cross= cross_val_score(model,X,Y,cv=5) # for performing Cross validation # Runs for atleat 5min | UTF-8 | Python | false | false | 4,153 | py | 16 | SVM - Final2.py | 14 | 0.607513 | 0.579099 | 0 | 122 | 32.057377 | 116 |
adamlehenbauer/cliptwit | 14,714,557,984,293 | 72c278f8cc33869a6b4203083c8ad4c673683ed8 | d89403011736904818e7f1826a3626b0465b7cee | /clip.py | 70fdefa8b5c63b3484078c51e58e47c740321616 | []
| no_license | https://github.com/adamlehenbauer/cliptwit | 2c64779d387ee830648f5e1c4a717d3484136d65 | 6e5e1ca43be81a23064f623077cabf9b9a356921 | refs/heads/master | 2018-12-29T08:23:32.673470 | 2011-03-03T20:05:38 | 2011-03-03T20:05:38 | 1,436,393 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import sys
import tweepy
from local_keys import *
def tweet(message):
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
result = api.update_status(message)
return result
if __name__ == '__main__':
f = open('/tmp/clip.log', 'a')
f.write("running clip\n")
message = gtk.clipboard_get().wait_for_text()
if len(message) > 140:
message = message[0:140]
f.write("message to post is: %s\n" % message)
result = tweet(message)
f.write('result: ' + str(result) + "\n")
f.close()
| UTF-8 | Python | false | false | 688 | py | 3 | clip.py | 1 | 0.627907 | 0.614826 | 0 | 31 | 21.193548 | 61 |
BrunnerLivio/mk-deps | 18,159,121,741,909 | 50117994335d20e4aadfce73f9107e62150b8fef | 93edf7de74b31e64ac8b13ae463ccab406022e78 | /mkdeps/cli.py | f270046b05396fbef57c1d40e0cfd746567b557a | [
"Apache-2.0"
]
| permissive | https://github.com/BrunnerLivio/mk-deps | faf9006ea9777959b1d4b1eaaaf625961deb9c29 | c5a0ea59ae77265a80cff4a85617f619c6c42828 | refs/heads/master | 2021-09-15T23:58:30.081482 | 2018-06-13T10:19:56 | 2018-06-13T10:19:56 | 84,289,493 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
`mk-deps` - Cli tool for installing runtime dependencies of a debian package
:copyright: (c) by Livio Brunner
:license: See LICENESE for details
"""
import sys
import logging
import click
from .core import install_dependencies, print_version
from .exit_status import ExitStatus
@click.group()
@click.option("--version",
count=True,
default=False,
help="Show version and copyright information")
def cli(version):
"""
Install runtime dependencies of a debian package
"""
if version:
print_version()
@cli.command()
@click.option("--package",
help="Installs just the given package from the given control file")
@click.option("--dry-run",
count=True,
default=False,
help="Run the command without actually installing packages")
@click.argument("control_file")
def install(package, dry_run, control_file):
"""
Install runtime-dependencies a debian package
"""
exit_status = ExitStatus.SUCCESS
print("\033[94mInstalling runtime dependencies..\033[0m")
exit_status = install_dependencies(control_file, package, dry_run)
print("\033[92mDone!\033[0m")
sys.exit(exit_status)
| UTF-8 | Python | false | false | 1,240 | py | 14 | cli.py | 8 | 0.656452 | 0.641935 | 0 | 45 | 26.555556 | 81 |
0Azore/Coral-Screen-Lib | 7,842,610,332,090 | a3f81262c7a01fe4f5f3e220e8ba3c8f78215e00 | 75897dee7df71878e2b58ab7b4fc234d5fde2ea7 | /lib/python3.7/dist-packages/adafruit_blinka/board/coral_edge_tpu.py | 42e88962ccad9ed8140795db1eaabfedc5d2c705 | []
| no_license | https://github.com/0Azore/Coral-Screen-Lib | b7ed8612ab0d3ab4bfdda7548c9f266927699c30 | 667ded719b55d516b28b56cb839e098a5832147c | refs/heads/master | 2022-05-20T11:40:25.024807 | 2020-04-16T04:02:48 | 2020-04-16T04:02:48 | 256,094,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Pin definitions for the Coral Edge TPU Dev board."""
from adafruit_blinka.microcontroller.nxp_imx8m import pin
UART1_RXD = pin.UART1_RXD
UART1_TXD = pin.UART1_TXD
SDA = pin.I2C2_SDA
SCL = pin.I2C2_SCL
PWM1 = pin.PWM1
PWM2 = pin.PWM2
PWM3 = pin.PWM3
GPIO_P22 = pin.GPIO22
GPIO_P13 = pin.GPIO6
GPIO_P16 = pin.GPIO73
GPIO_P18 = pin.GPIO138
GPIO_P29 = pin.GPIO7
GPIO_P31 = pin.GPIO8
GPIO_P36 = pin.GPIO141
GPIO_P37 = pin.GPIO77
MISO = pin.ECSPI1_MISO
MOSI = pin.ECSPI1_MOSI
SCLK = pin.ECSPI1_SCLK
SCK = pin.ECSPI1_SCLK
SS0 = pin.ECSPI1_SS0
| UTF-8 | Python | false | false | 546 | py | 3 | coral_edge_tpu.py | 2 | 0.725275 | 0.628205 | 0 | 28 | 18.464286 | 57 |
DrFirestream/Tabaqui | 8,684,423,876,053 | 859de5526387de2b0eb00146d766243d45674b65 | 4ac11b3ac16e8ede6075b7da7008b6c63aab2788 | /script.py | 103b536a86570bdfa2139af92d74a32e9fbfda4c | [
"MIT"
]
| permissive | https://github.com/DrFirestream/Tabaqui | 59faa185db7fc030dcb9e8278441e3a9bf9413af | dad4fe2b3dcf01195bac330de509663ea2bd6e54 | refs/heads/master | 2022-12-24T15:20:27.472475 | 2020-09-19T21:22:22 | 2020-09-19T21:22:22 | 296,133,707 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from collections import defaultdict, deque
import random
import boto3
import botocore
import os
import json
import requests
aws_key = os.environ['AWS_ACCESS_KEY_ID']
aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
telegram_token = os.environ['TELEGRAM_TOKEN']
server_url = os.environ['SERVER_URL']
users = json.loads(os.environ['CHAT_USERS'])
class GPTBot:
def __init__(self, max_len = 5):
self.updater = Updater(telegram_token, use_context = True)
self.messages = defaultdict(lambda: deque(maxlen=max_len))
dp = self.updater.dispatcher
dp.add_handler(CommandHandler('start',self.start))
dp.add_handler(CommandHandler('whatabout',self.whatabout))
dp.add_handler(MessageHandler(Filters.text & (~Filters.command),self.on_message))
def start(self, update, context):
context.bot.send_message(update.message.chat_id, 'Usage: just write something then write /whatabout')
def on_message(self, update, context):
if not update or not update.message or not update.message.chat_id or not update.message.text or not update.message.from_user:
return
msg = self.messages[update.message.chat_id]
userm = update.message.from_user
user = users.get(userm.first_name + ' ' + userm.last_name, None)
if user and update.message.text:
msg.append(user + ': "' + update.message.text + '"\n')
else:
print(userm)
def whatabout(self, update, context):
chat_id = update.message.chat_id
msgs = self.messages[chat_id]
ruser = random.choice(list(users.values()))
if not msgs:
text = '"Дом мой пуст"'
else:
cfg = botocore.config.Config(retries={'max_attempts': 0}, read_timeout=360, connect_timeout=360, region_name="eu-central-1" )
client = boto3.client('lambda', config=cfg, region_name='eu-central-1', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret_key)
payload={"Prompt": ''.join(msgs) + ruser + ': "', "Temperature": 0.9, "NQuotes": 1}
response = client.invoke(FunctionName = 'tabaqui_response', InvocationType = 'RequestResponse', LogType = 'Tail', Payload = json.dumps(payload))
dictj = json.loads(response['Payload'].read().decode())
text = dictj['Response']
text = [s for s in text.split('"') if len(s) > 0][:1]
context.bot.send_message(chat_id, text[0] if text and text[0] else 'Акела промахнулся')
def main():
r = requests.get(url = 'https://api.telegram.org/bot%s/setWebHook'%(telegram_token))
bot = GPTBot()
bot.updater.start_polling()
bot.updater.idle()
r = requests.get(url = 'https://api.telegram.org/bot%s/setWebHook?url=%s'%(telegram_token, server_url))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,986 | py | 4 | script.py | 2 | 0.635135 | 0.628716 | 0 | 63 | 44.984127 | 156 |
Kamik423/advent-of-code-2020 | 6,133,213,332,761 | 142df6d51595bfb4d28a8eee6708390624d9cff0 | 5dd7af5235148ca22df8ace91a820cc5117671f5 | /07_alternate.py | 185c5de6556791c33c420f94514fb60b6717de45 | []
| no_license | https://github.com/Kamik423/advent-of-code-2020 | fc5a6745049ead3923428119eb3bec23dc259769 | dd9335bf961f28d9746a016f4766d90791b478c8 | refs/heads/main | 2023-02-14T16:05:27.514571 | 2020-12-27T11:52:53 | 2020-12-27T11:52:53 | 317,509,326 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
from __future__ import annotations
from typing import Dict, List, Tuple
import aoc
import regex
from cached_property import cached_property
class Graph:
rules: Dict[str, Rule] = {}
def __init__(self, description: str):
for line in description.split("\n"):
rule = Rule(line, self)
self.rules[rule.color] = rule
class Rule:
children_specs: Dict[str, int]
@cached_property
def children(self) -> List[Tuple[Rule, 1]]:
return [
(self.graph.rules[color], count)
for color, count in self.children_specs.items()
]
@cached_property
def contains_shiny_gold(self) -> bool:
return any(
child.color == "shiny gold" or child.contains_shiny_gold
for child, _ in self.children
)
@cached_property
def contained_bags(self) -> int:
return sum(
[count * (child.contained_bags + 1) for child, count in self.children]
)
color: str
graph: Graph
def __init__(self, line: str, graph: Graph):
self.graph = Graph
match = regex.fullmatch(
r"^([a-z ]+?) bags contain (?:(no|\d+) ([a-z ]+?) bags?(?:\.$|, ))+",
line,
)
self.color = match[1]
self.children_specs = {}
for amount, color in zip(match.captures(2), match.captures(3)):
if amount not in ["no", "0"]:
self.children_specs[color] = int(amount)
def main() -> None:
graph = Graph(aoc.get_str(7).strip())
print(len([rule for rule in graph.rules.values() if rule.contains_shiny_gold]))
print(graph.rules["shiny gold"].contained_bags)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,734 | py | 26 | 07_alternate.py | 24 | 0.564591 | 0.559977 | 0 | 66 | 25.272727 | 83 |
elchuzade/racing-game-ai-dqn | 16,535,624,131,006 | d2ae2abbf763af1de9720c9f16d19ba403b1ed16 | cb6adaeee38183fc06c4eb89c235ef1647fabff3 | /helpers/utils.py | 990abda70424e0485ea8d56479873840b0602d68 | []
| no_license | https://github.com/elchuzade/racing-game-ai-dqn | d8ff905af6064e852d92508590ffc538d2873067 | c0fcb6c0cb355fadd870ac642dfff7efadb356d3 | refs/heads/master | 2020-12-06T17:01:51.002830 | 2020-01-09T11:54:25 | 2020-01-09T11:54:25 | 232,513,130 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from constants import Constants
from helpers.core import *
import random
import pygame
import numpy as np
def map_cars_to_state(cars, my_car):
my_position = find_my_position(my_car)
lines = map_cars_to_lines(cars, my_car)
all_distances = find_all_distances(lines)
state = [my_position, all_distances[0], all_distances[1], all_distances[2]]
return state
def find_all_distances(lines):
# Returns an array of 3 numbers representing distances to the nearest cars on each road line
all_distances = []
distance_0 = find_closest_car(lines, 0)
all_distances.append(distance_0)
distance_1 = find_closest_car(lines, 1)
all_distances.append(distance_1)
distance_2 = find_closest_car(lines, 2)
all_distances.append(distance_2)
return all_distances
def find_my_position(my_car):
# returns index of a line my car is at (0 or 1 or 2)
return int(my_car.x // Constants.LINE_WIDTH.value)
def map_cars_to_lines(cars, my_car):
# lines will represent 3 arrays corresponsding to 3 vertical road lines
lines = [[0, 0, 0, 0, 0, 0, 0, 0], [
0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
for car in cars:
# 2 is a label for enemy cars
coord_x = car.x // Constants.LINE_WIDTH.value
coord_y = car.y // Constants.LINE_WIDTH.value
lines[int(coord_x)][int(coord_y)] = 2
# 1 is a label for my car
my_coord_x = my_car.x // Constants.LINE_WIDTH.value
my_coord_y = my_car.y // Constants.LINE_WIDTH.value
lines[int(my_coord_x)][int(my_coord_y)] = 1
return lines
def check_if_lost(cars, my_car):
# For all cars on map check if x and y coordinates are equal to my_car's
for car in cars:
if car.x == my_car.x and car.y == my_car.y:
return True
return False
def find_closest_car(lines, index):
# Find and return the distance to the first car on a line given by index
count = -1
# Loops through each line and finds the number 2 which represents the enemy car
for i in reversed(lines[index]):
# When found an enemy car returns a distance from my car to enemy car
if i == 2:
return count
count += 1
# If there are no cars on the line, returns max possible number ie length of the line
return count
def deactivate_cars(cars):
# deactivate_cars checks if a car is outside of map boundaries and deactivates it
for car in cars:
# If the enemy_car has reached the bottom of any road line, deactivate it
if car.y > Constants.HEIGHT.value - Constants.MARGIN.value - Constants.CAR_HEIGHT.value/2:
car.active = False
return cars
def move_cars(cars):
# move_cars calls move method of each car in cars state if the car is active
for car in cars:
if car.active == True:
car.move()
return cars
def move_my_car(my_car, action):
if action == 0:
my_car.move("left")
elif action == 2:
my_car.move("right")
elif action == 1:
my_car.move("up")
return my_car
def perform_action(action, cars, my_car):
cars = move_cars(cars)
my_car = move_my_car(my_car, action)
cars = deactivate_cars(cars)
for car in cars:
if car.active == False:
cars.remove(car)
return cars, my_car
def add_new_car(cars):
# add_new_car will add a new enemy_car on the top level of one of the lines picked randomly
index = random.randint(0, 2)
# Y coordinate of new cars
y = Constants.MARGIN.value + Constants.CAR_HEIGHT.value/2
if index == 0:
# Center of first line
x = Constants.MARGIN.value + Constants.CAR_WIDTH.value/2
elif index == 1:
# Center of second line
x = Constants.MARGIN.value + Constants.CAR_WIDTH.value/2 + Constants.LINE_WIDTH.value
elif index == 2:
# Center of third line
x = Constants.MARGIN.value + Constants.CAR_WIDTH.value / \
2 + Constants.LINE_WIDTH.value*2
car = Enemy_car(x, y)
cars.append(car)
return cars
def initialize_screen():
# Size is the game screen size in pixels
SIZE = width, height = Constants.WIDTH.value, Constants.HEIGHT.value
SCREEN = pygame.display.set_mode(SIZE)
pygame.display.flip()
return SCREEN
def draw_cars(screen, cars):
# draw_cars will draw each car from cars array on screen using its icon
for car in cars:
screen.blit(
Constants.ENEMY_CAR_ICON.value, (car.x - car.width/2, car.y - car.height/2))
def draw_my_car(screen, my_car):
# draw_my_car will draw my car on screen using its icon
screen.blit(Constants.MY_CAR_ICON.value, (my_car.x - my_car.width /
2, my_car.y - my_car.height/2))
def draw_vertical_lines(screen):
# draw left vertical line to separate roads
pygame.draw.rect(screen, Constants.GREY.value,
(Constants.LINE_WIDTH.value - Constants.ROAD_LINE_WIDTH.value / 2, 0, Constants.ROAD_LINE_WIDTH.value, Constants.HEIGHT.value))
# draw right vertical line to separate roads
pygame.draw.rect(screen, Constants.GREY.value, (Constants.LINE_WIDTH.value *
2 - Constants.ROAD_LINE_WIDTH.value / 2, 0, Constants.ROAD_LINE_WIDTH.value, Constants.HEIGHT.value))
def ai_model(model, cars, my_car):
# AI will put state into model, predict the action and perform it
# Build input array for ai model
input_state = map_cars_to_state(cars, my_car)
# Pass input through model to find action
action = predict(input_state, model)
lost = check_if_lost(cars, my_car)
if lost:
print("Ops! Your model has crashed!")
# Perform the action suggested by ai model
perform_action(action, cars, my_car)
def predict(input_array, model):
# Puts input state into neural network and returns an action predicted by model
# Convert array into model input format
input_state = np.array([input_array])
# Pass input through model to get prediction
action = model.predict_classes(input_state)
return action
| UTF-8 | Python | false | false | 6,124 | py | 6 | utils.py | 4 | 0.644024 | 0.632103 | 0 | 185 | 32.102703 | 153 |
minghao2016/networksize | 13,486,197,333,027 | ec00469f1fa2380c9e1f26e8f3986e48bb843dca | 1d9723eb409dfaaec56d5dabcce35833142e08ef | /networksize/__init__.py | e668182846518894042074426718f5e389e09fd4 | []
| no_license | https://github.com/minghao2016/networksize | 67be4a60a55c5e0b4d57a5e60ff140b2bb44b2ba | 786dbee016e3c69ee5b977ac45467aff4221a2aa | refs/heads/master | 2021-04-30T18:05:40.477542 | 2015-08-27T19:59:29 | 2015-08-27T19:59:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from importer import *
from experiment import *
from crawler import *
from graphtools import *
from graphserver import *
| UTF-8 | Python | false | false | 121 | py | 23 | __init__.py | 19 | 0.793388 | 0.793388 | 0 | 5 | 23.2 | 25 |
igwangsung/nomadgram | 16,801,912,067,715 | 8a3b88840b2e60d0a1ab42ba64095c55e431f334 | ad4839bc68f3985f11296ed423cca6c319f6e02b | /nomad_coder/images/views.py | 5f13e1a4c6deff035303ca4271208785e1fdfb0b | [
"MIT"
]
| permissive | https://github.com/igwangsung/nomadgram | 5872820a1e699a5680a047ac294397d2e31a580b | ec902cc3820f8b9ff082dd782a56ebef4de8c2e6 | refs/heads/master | 2023-02-05T09:06:21.736680 | 2019-03-19T17:58:19 | 2019-03-19T17:58:19 | 153,406,125 | 0 | 0 | MIT | false | 2023-01-26T05:38:21 | 2018-10-17T06:26:28 | 2022-12-03T01:41:03 | 2023-01-26T05:38:17 | 5,840 | 1 | 0 | 29 | JavaScript | false | false | #from django.shortcuts import render #For using Template Concept of Django
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from . import models, serializers
from nomad_coder.notifications import views as notification_views
from nomad_coder.users import models as user_models
from nomad_coder.users import serializers as user_serializers
class ListAllImages(APIView):
def get(self, request, format=None): #form=None means returning JSON
all_images = models.Image.objects.all()
serializer = serializers.ImageSerializer(all_images, many=True, context={'request':request}) #serializer is Class
return Response(data=serializer.data) #data 쓰는거 잊지말기!!
class ListAllComments(APIView):
def get(self, request, format=None):
all_comments = models.Comment.objects.all()
serializer = serializers.CommentSerializer(all_comments, many=True)
return Response(data=serializer.data)
class ListAllLikes(APIView):
def get(self, request, format=None):
all_likes = models.Like.objects.all()
serializer = serializers.LikeSerializer(all_likes, many=True)
return Response(data=serializer.data)
class Images(APIView):
def get(self, request, format=None):
user = request.user
following_users = user.following.all()
image_list = []
for following_user in following_users:
user_images = following_user.images.all()[:2]
for image in user_images:
image_list.append(image)
my_images = user.images.all()[:2]
for image in my_images:
image_list.append(image)
#There are some bugs bc of created_at, updated_at Field
#InLine Function --> lambda
sorted_list = sorted(image_list,key=lambda image: image.created_at, reverse=True)
#context 전달하기 시리얼라이즈에 view에 있는 정보 전달하기
serializer = serializers.ImageSerializer(sorted_list, many=True, context={'request':request})
return Response(data=serializer.data)
def post(self, request, format=None):
user = request.user
serializer = serializers.InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = models.Image.objects.get(id=image_id, creator=user)
return image
except models.Image.DoesNotExist:
return None
def get(self, request, image_id, format=None):
user = request.user
try:
image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
#Is Single so, I don't need 'many=True'
serializer = serializers.ImageSerializer(image, context={'request':request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id, format=None):
user = request.user
image = self.find_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_401_UNAUTHORIZED)
#부분 update 할때 partial 속성 이용하기
serializer = serializers.InputImageSerializer(image, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_204_NO_CONTENT)
else:
return Response(data=serializer.error, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, image_id, format=True):
user = request.user
image = self.find_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LikeImage(APIView):
#updated_at, created_at is datetime.datetime(), tzinfo=<UTC>
#query 안의 데이터들을 추출할수있고 볼수있다!!!!!
def get(self, request, image_id, format=None):
likes = models.Like.objects.filter(image__id=image_id)
like_creator_ids = likes.values('creator_id') # DB objects makes automatically creator_id...?
users = user_models.User.objects.filter(id__in=like_creator_ids)
#, context={"request":request} 이게 무슨 의미일까?
serializer = user_serializers.ListUserSerializer(users, many=True, context={"request":request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id, format=None):
user = request.user
#create notification for like
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisting_like = models.Like.objects.get(
creator=user,
image=found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator=user,
image=found_image
)
new_like.save()
notification_views.create_notification(creator=user, to=found_image.creator, type='like', image=found_image)
return Response(status=status.HTTP_201_CREATED)
# class UnLikeImage(APIView):
# def delete(self, request, image_id, format=None):
# user = request.user
# try:
# found_image = models.Image.objects.get(id=image_id)
# except models.Image.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# try:
# preexisting_like = models.Like.objects.get(
# creator=user,
# image=found_image
# )
# preexisting_like.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
# except models.Like.DoesNotExist:
# return Response(status=status.HTTP_304_NOT_MODIFIED)
class UnLikeImage(APIView):
def delete(self, request, image_id, format=None):
user = request.user
try:
preexisiting_like = models.Like.objects.get(
creator=user,
image__id=image_id
)
preexisiting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user, image=found_image)
notification_views.create_notification(creator=user, to=found_image.creator, type='comment', image=found_image, comment=serializer.data["message"])
return Response(data=serializer.data, status=status.HTTP_201_CREATED) #Response Django REST 에서 보이는거
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Comment(APIView):
def delete(self, request, comment_id, format=None):
user = request.user
#create notification for Comment
try:
comment = models.Comment.objects.get(id=comment_id, creator=user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
#timezone.now() 공부하기
class ModerateComment(APIView):
def delete(self, request, image_id, comment_id, format=None):
user = request.user
#Comment Model has the Image Attribute
try:
comment_to_delete = models.Comment.objects.get(id=comment_id, image__id=image_id, image__creator=user)
comment_to_delete.delete()
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request, format=None):
hashtags = request.query_params.get('hashtags', None)
if hashtags is not None:
hashtags = hashtags.split(',')
images = models.Image.objects.filter(tags__name__in=hashtags).distinct()
serializer = serializers.CountImageSerializer(images, many=True, context={'request':request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
images = models.Image.objects.all()[:20]#전체 URL을 볼수 있도록!!
serializer = serializers.CountImageSerializer(images, many=True, context={'request':request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
| UTF-8 | Python | false | false | 9,784 | py | 12 | views.py | 7 | 0.632066 | 0.623234 | 0 | 311 | 29.913183 | 159 |
Captricity/cappa | 3,994,319,601,085 | bfe9a55885c4bcd5962e6b006f83a951aa820ab6 | 94cd09a01b3f5ae57bc70802f3319b08fdf8a999 | /setup.py | 1d0f27d168011f317f2724bfc8442b0ece913da0 | [
"MIT"
]
| permissive | https://github.com/Captricity/cappa | 922243179939e886efad58e05f42fb265e080c92 | 6fa541e145c7fac7c499a63f7625be0dc8d3ea5d | refs/heads/master | 2022-11-12T23:21:23.177751 | 2020-11-18T19:16:24 | 2020-11-18T19:16:24 | 28,253,428 | 3 | 1 | MIT | false | 2022-10-28T17:37:02 | 2014-12-20T01:51:48 | 2020-11-18T19:16:28 | 2022-10-28T17:36:48 | 95 | 2 | 1 | 8 | Python | false | false | from setuptools import setup, find_packages
setup(
name="cappa",
version="0.19.0",
description="Package installer for Captricity. Supports apt-get, pip, bower, npm, and yarn.",
author="Yoriyasu Yano",
author_email="yorinasub17@gmail.com",
packages=find_packages(),
scripts=["scripts/cappa"],
install_requires=open("requirements.txt").read().split(),
tests_requires=open("test_requirements.txt").read().split(),
test_suite='tests'
)
| UTF-8 | Python | false | false | 474 | py | 57 | setup.py | 35 | 0.681435 | 0.668776 | 0 | 15 | 30.6 | 97 |
diaojingwen/pytest1 | 16,716,012,758,875 | 4d03cf7940982c46c0703451671391d9fe1a650a | 7c3fd970bed4d234bf1b990b1e93296c9c321717 | /hello.py | 0c564def238e2b5f72a7830527df78c8f25894b3 | []
| no_license | https://github.com/diaojingwen/pytest1 | ac0b76f2ae11c37714457ed347416833d99efd66 | b2f478b3b1a1633d09137373885ebed32e78c84e | refs/heads/master | 2023-01-19T14:42:45.468574 | 2020-11-26T08:09:00 | 2020-11-26T08:09:00 | 316,162,567 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import allure
import pytest
def inc(x):
return x + 1
def test_answer():
assert inc(3) == 4
class Testa:
def test_demo_1(self):
assert inc(1) == 3
def test_demo_2(self):
assert not inc(2) != 4
@pytest.mark.parameterize("data,expect",[
(1,2),
(3,4),
(10.11),
(14,17)
])
def test_data(self,data,expect):
allure.attach("this is test",attachment_type=allure.attachment_type.TEXT)
allure.attach('<img src="https://img-blog.csdnimg.cn/2020102316024217.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQzNzYyMTkx,size_16,color_FFFFFF,t_70#pic_center" alt="在这里插入图片描述">',
attachment_type=allure.attachment_type.HTML)
assert inc(data) == expect
def teardown(self):
print("teardown") | UTF-8 | Python | false | false | 908 | py | 4 | hello.py | 1 | 0.614607 | 0.55618 | 0 | 33 | 26 | 253 |
eLOPy/Asteroids | 1,795,296,348,209 | 81a386485e1be3e99aabf733f999fdd2d59c38b7 | c19020cb7abac2ff32a07f2844bd9ec1061914b1 | /Game.py | 460e97e6dbfec0bd90ca4c6eefa428f17b31d2d3 | []
| no_license | https://github.com/eLOPy/Asteroids | 00b7fa9c367f0f37c6fa244e3373bc6cda56443e | 68f05e96d0a053ac9f6fd06e691a6512b79f44cf | refs/heads/master | 2018-01-07T11:44:36.180266 | 2015-08-14T01:24:54 | 2015-08-14T01:24:54 | 40,689,409 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Muneer'
import Consts, pygame, random, GUI, Sound
from game_objects.Asteroid import Asteroid
from game_objects.Ship import Ship
from game_objects.Item import Item
from game_objects.Bullet import Bullet
from game_objects.GameObjectManager import GameObjectManager
class Game():
def __init__(self):
self.score = 0
self.level = 1
# make the gui
self.gui = GUI.GUI()
# make the game object manager
self.game_object_manager = GameObjectManager()
# create the bullets
temp_bullets = []
for index in range(Consts.MAX_BULLETS):
temp_bullets.append(Bullet())
self.game_object_manager.add_game_object(temp_bullets[index])
# make a player
self.player = Ship(Consts.CENTER, temp_bullets)
self.game_object_manager.add_game_object(self.player)
# spawn the asteroids
self.spawn_asteroids()
self.add_sounds()
Sound.sound_manager.play_sound(Sound.sounds.begin)
def reinit(self, deltaT):
self.level = 1
self.score = 0
self.clear_everything_except_player()
self.spawn_asteroids()
self.player.reinit()
self.game_object_manager.update(deltaT)
def draw(self, screen):
# clear the screen
Consts.screen.fill(Consts.BLACK)
# draw the game objects
self.game_object_manager.draw(screen)
# draw the ui
self.draw_ui(screen)
# flip the buffer
pygame.display.flip()
def update(self, deltaT):
self.game_object_manager.update(deltaT)
self.game_object_manager.collision_detection()
self.game_logic(deltaT)
self.split_hurt_asteroids()
self.check_for_homing()
def game_logic(self, deltaT):
# if the player has died finish the game and restart
if self.player.lives <= 0:
self.show_end_screen()
self.reinit(deltaT)
# if there are no more asteroids advance the level
if len(self.get_asteroids_remaining()) == 0:
self.advance_level()
def show_end_screen(self):
self.gui.draw_text("You lost!",Consts.CENTER, Consts.screen)
self.gui.draw_text("Score: " + str(self.score),(Consts.CENTER[0], Consts.CENTER[1]+50), Consts.screen)
pygame.display.flip()
pygame.time.delay(1500)
def advance_level(self):
self.level += 1
self.spawn_asteroids()
self.player.reset_bullets()
Sound.sound_manager.play_sound(Sound.sounds.begin)
def spawn_asteroids(self):
for index in range(0, self.level):
self.game_object_manager.add_game_object(Asteroid(32, Consts.SCREENLENGTHS, self.player))
def clear_everything_except_player(self):
for game_object in self.game_object_manager.game_objects:
if not(isinstance(game_object,Ship) or isinstance(game_object, Bullet)):
game_object.needs_removing = True
def split_hurt_asteroids(self): # splits any asteroids that need splitting
for go in self.game_object_manager.game_objects:
if isinstance(go, Asteroid):
if go.isHurt:
self.increase_score(go.radius)
if go.radius > 4: # if its big enough split it
self.split_asteroid(go)
if go.radius == 32: # chance an item drop when asteroid is 32 big
self.chance_a_health_drop(go)
else: # else just kill it
go.needs_removing = True
def split_asteroid(self, asteroid):
for index in range(2):
self.game_object_manager.add_game_object(Asteroid(asteroid.radius/2,Consts.SCREENLENGTHS, self.player, asteroid.position))
self.game_object_manager.remove_game_object(asteroid)
def chance_a_health_drop(self, go):
if random.randint(1,2) == 1:
self.game_object_manager.add_game_object(Item(go.position))
def increase_score(self, amount):
self.score += amount
def get_asteroids_remaining(self): # Returns the amount of asteroids in the game
asteroids = []
for game_object in self.game_object_manager.game_objects:
if isinstance(game_object, Asteroid):
asteroids.append(game_object)
return asteroids
def check_for_homing(self):
small_count = 0
asteroids_in_game = self.get_asteroids_remaining()
for asteroid in asteroids_in_game:
if asteroid.radius == 4:
small_count += 1
if small_count == len(asteroids_in_game):
for asteroid in asteroids_in_game:
asteroid.is_homing = True
asteroid.target = self.player.position
if Sound.sound_manager.sounds[Sound.sounds.homing].get_num_channels() == 0:
Sound.sound_manager.play_sound(Sound.sounds.homing, 20)
def add_sounds(self):
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/start.wav"))
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/shoot.wav"))
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/asteroid_hurt.wav"))
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/item_pickup.wav"))
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/ship_thrusters2.wav"))
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/homing_alarm.wav"))
Sound.sound_manager.add_sound(pygame.mixer.Sound("Sounds/player_hurt.wav"))
def draw_ui(self,screen):
self.gui.draw_text("Level: " + str(self.level), (10,10), screen) #show the level top right
self.gui.draw_text(
"Asteroids Remaining: " + str(len(self.get_asteroids_remaining()))
, (10, 40), screen) #show remaining asteroids - top right
self.gui.draw_text("Score: " + str(self.score), (10, 70), screen)
self.gui.draw_text("Lives: " + str(self.player.lives), (10, 100), screen) #show players health - top left | UTF-8 | Python | false | false | 6,315 | py | 14 | Game.py | 14 | 0.599367 | 0.591132 | 0 | 152 | 40.552632 | 134 |
jirikuncar/renku-gateway | 7,602,092,116,520 | 7ffae633cc521565fbafc9388b6fe0ac8c3d829c | 7d41460728007bc32bb7e33b35268ec18cd821d1 | /app/config.py | 7b2baea71a9f8d66def44f0c1eb6363b4d34d6e3 | [
"Apache-2.0"
]
| permissive | https://github.com/jirikuncar/renku-gateway | 06f21c96638b08e9044c3889e7eec93896080be3 | 7e9882f48f7020d143d6ccdc8c7dfaa433fdebea | refs/heads/master | 2020-04-11T02:51:01.890298 | 2018-11-27T14:51:55 | 2018-11-28T22:11:16 | 161,458,507 | 0 | 0 | Apache-2.0 | true | 2018-12-12T08:45:04 | 2018-12-12T08:45:03 | 2018-11-28T22:11:19 | 2018-12-11T15:56:47 | 239 | 0 | 0 | 0 | null | false | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global settings."""
import json
import os
import re
import requests
import sys
from time import sleep
from logging import getLogger
from collections import OrderedDict
logger = getLogger(__name__)
config = dict()
config['HOST_NAME'] = os.environ.get('HOST_NAME', 'http://gateway.renku.build')
if 'GATEWAY_SECRET_KEY' not in os.environ and "pytest" not in sys.modules:
logger.critical('The environment variable GATEWAY_SECRET_KEY is not set. It is mandatory for securely signing session cookie.')
exit(2)
config['SECRET_KEY'] = os.environ.get('GATEWAY_SECRET_KEY')
# We need to specify that the cookie is valid for all .renku.build subdomains
if 'gateway.renku.build' in config['HOST_NAME']:
config['SESSION_COOKIE_DOMAIN'] = '.'.join([''] + config['HOST_NAME'].split('.')[1:])
else:
config['SESSION_COOKIE_DOMAIN'] = None
config['SESSION_COOKIE_HTTPONLY'] = True
config['SESSION_COOKIE_SECURE'] = config['HOST_NAME'].startswith('https')
config['ALLOW_ORIGIN'] = os.environ.get('GATEWAY_ALLOW_ORIGIN', "").split(',')
config['REDIS_HOST'] = os.environ.get('GATEWAY_REDIS_HOST', 'renku-gw-redis')
config['RENKU_ENDPOINT'] = os.environ.get('RENKU_ENDPOINT', 'http://renku.build')
config['GITLAB_URL'] = os.environ.get('GITLAB_URL', 'http://gitlab.renku.build')
config['GITLAB_PASS'] = os.environ.get('GITLAB_PASS', 'dummy-secret')
config['GITLAB_CLIENT_ID'] = os.environ.get('GITLAB_CLIENT_ID', 'renku-ui')
config['GITLAB_CLIENT_SECRET'] = os.environ.get('GITLAB_CLIENT_SECRET', 'no-secret-needed')
if 'GITLAB_CLIENT_SECRET' not in os.environ:
logger.warning('The environment variable GITLAB_CLIENT_SECRET is not set. It is mandatory for Gitlab login.')
config['JUPYTERHUB_URL'] = os.environ.get('JUPYTERHUB_URL', '{}/jupyterhub'.format(config['HOST_NAME']))
config['JUPYTERHUB_CLIENT_ID'] = os.environ.get('JUPYTERHUB_CLIENT_ID', 'gateway')
config['JUPYTERHUB_CLIENT_SECRET'] = os.environ.get('JUPYTERHUB_CLIENT_SECRET', 'dummy-secret')
if 'JUPYTERHUB_CLIENT_SECRET' not in os.environ:
logger.warning('The environment variable JUPYTERHUB_CLIENT_SECRET is not set. It is mandatory for JupyterHub login.')
config['OIDC_ISSUER'] = os.environ.get('KEYCLOAK_URL', 'http://keycloak.renku.build:8080') \
+ '/auth/realms/Renku'
config['OIDC_CLIENT_ID'] = os.environ.get('OIDC_CLIENT_ID', 'gateway')
config['OIDC_CLIENT_SECRET'] = os.environ.get('OIDC_CLIENT_SECRET', 'dummy-secret')
if 'OIDC_CLIENT_SECRET' not in os.environ:
logger.warning('The environment variable OIDC_CLIENT_SECRET is not set. It is mandatory for OpenId-Connect login.')
config['SERVICE_PREFIX'] = os.environ.get('GATEWAY_SERVICE_PREFIX', '/')
# Get the public key of the OIDC provider to verify access- and refresh_tokens
# TODO: The public key of the OIDC provider should go to the app context and be refreshed
# TODO: regularly or whenever the validation of a token fails and the public key has not been
# TODO: updated in a while.
config['GATEWAY_ENDPOINT_CONFIG_FILE'] = os.environ.get('GATEWAY_ENDPOINT_CONFIG_FILE', 'endpoints.json')
def load_config():
from . import app
app.config['GATEWAY_ENDPOINT_CONFIG'] = {}
try:
with open(app.config['GATEWAY_ENDPOINT_CONFIG_FILE']) as f:
c = json.load(f, object_pairs_hook=OrderedDict)
for k, v in c.items():
app.config['GATEWAY_ENDPOINT_CONFIG'][re.compile(r"{}(?P<remaining>.*)".format(k))] = v
except:
logger.error("Error reading endpoints config file", exc_info=True)
logger.debug(app.config['GATEWAY_ENDPOINT_CONFIG'])
if "pytest" in sys.modules:
okKey = True
else:
okKey = False
attempts = 0
while attempts < 20 and not okKey:
attempts += 1
try:
raw_key = requests.get(config['OIDC_ISSUER']).json()['public_key']
config['OIDC_PUBLIC_KEY'] = '-----BEGIN PUBLIC KEY-----\n{}\n-----END PUBLIC KEY-----'.format(raw_key)
okKey = True
logger.info('Obtained public key from Keycloak.')
except:
logger.info('Could not get public key from Keycloak, trying again...')
sleep(10)
if not okKey:
logger.info('Could not get public key from Keycloak, giving up.')
exit(1)
| UTF-8 | Python | false | false | 4,953 | py | 24 | config.py | 16 | 0.70291 | 0.698464 | 0 | 120 | 40.233333 | 131 |
adfinis-sygroup/freewvs-wrapper | 9,337,258,923,283 | 793c1e5f8ca4165465f02b3f097953ec39cc7c64 | 44097dfffb147f89260f71630789ba2485e42ba2 | /processResults.py | 89109febcc95d308eacc7908d04b9dfb34efe5e5 | [
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
]
| permissive | https://github.com/adfinis-sygroup/freewvs-wrapper | d19bc8b0a204cc02d42010da89d1651d6cb8a6fe | ef6f4a98a25923e1caf709886604d054d27908af | refs/heads/master | 2021-01-11T05:40:47.501350 | 2019-12-18T12:18:00 | 2019-12-18T12:18:00 | 71,562,253 | 2 | 2 | NOASSERTION | false | 2019-12-18T12:18:02 | 2016-10-21T12:14:39 | 2019-12-17T16:22:48 | 2019-12-18T12:18:01 | 9 | 1 | 1 | 0 | Python | false | false | # process freewvs results
#
# Parses the resulting xml into csv and sends it via email to subscribers.
# All settings are in settings.py module.
#
# Usage:
# ./freewvs -x path/to/hostings > vulnscan.xml && python processResults.py
#
# I recommand updating the freewvsdb/ dir or it won't work too long. You can
# pass the output from `svn up` to this script as the first arg and it will be
# included in the mail.
#
# Example:
# update="$(svn up $install_dir/freewvsdb)"
# python ./processResults.py "${update}"
#
# Please note this script is not trying to win a beauty contest, it was written
# specifically to deal with the freewvs results.
import csv
import time
import os
import sys
import re
import settings
from lxml import etree
def parse_script_path(path, split):
# make sure split keyword ends with a slash or the following split
# won't make sense
if not split.endswith('/'):
split += '/'
parts = path.partition(split)
return parts[2].split('/', 1)
def parse_result_xml(in_file, out_file):
"""Parse freewvs result xml into csv"""
with open(out_file, 'w') as fds:
header = (
'appname', 'version', 'hosting', 'path', 'safeversion', 'vulninfo')
writer = csv.writer(fds)
writer.writerow(header)
root = etree.parse(in_file)
for app in root.iter('app'):
# split directory into hosting name and relative path to script
# directory
hosting, path = parse_script_path(
app.find('directory').text,
settings.HOSTING_DIR
)
appname = app.find('appname').text
version = app.find('version').text
hosting = hosting
path = path
safeversion = app.find('safeversion').text
vulninfo = app.find('vulninfo').text
row = appname, version, hosting, path, safeversion, vulninfo
writer.writerow(row)
def email_results(recipients, attachments, text_addon):
"""Send an email with attachment(s)"""
from smtplib import SMTP
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
import socket
# scan results from <datetime> on <hostname>
text = (
"Web vulnerability scan results from %s on %s." % (
time.strftime('%d.%m.%Y %H:%M'),
socket.gethostname()
)
)
if text_addon:
text += '\n\n' + text_addon
text += '\n\nPowered by freewvs and Adfinis SyGroup AG.'
text += '\nhttps://wiki.adfinis-sygroup.ch/adsy/index.php/Freewvs'
# build message
msg = MIMEMultipart()
msg['Subject'] = 'Vulnerability scan results %s' % time.strftime('%Y%m%d')
msg['From'] = settings.SMTP_USER
msg['To'] = ', '.join(recipients)
msg.attach(MIMEText(text))
for fds in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(fds, 'rb').read())
Encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % os.path.basename(fds)
)
msg.attach(part)
# send mail
smtp = SMTP(settings.SMTP_HOST, settings.SMTP_PORT)
smtp.starttls()
if settings.SMTP_PASS:
smtp.login(settings.SMTP_USER, settings.SMTP_PASS)
smtp.sendmail(msg['From'], recipients, msg.as_string())
smtp.quit()
# get output from `svn up`
try:
update_output = sys.argv[1]
# get revision from update message
match = re.search('revision (\d+)?', update_output)
db_version = match.groups()[0]
# there was an update
if update_output.lower().find('updated') >= 0:
update_msg = 'freewvsdb was automatically updated to revision %s' % (
db_version
)
# no update
else:
update_msg = 'freewvsdb is up-to-date at revision %s' % db_version
except:
update_msg = 'Notice: freewvsdb updater is not working. ' \
'This should be fixed!'
# parse freewvs xml to csv
parse_result_xml(settings.IN_FILE, settings.OUT_FILE)
# notify subscribers and send csv as attachment
email_results(
recipients=settings.SUBSCRIBERS,
attachments=[settings.OUT_FILE],
text_addon=update_msg
)
# log some infos
print(
"\n[%s] Vulnerability scan complete. Sent email." %
time.strftime('%Y-%m-%d-%H:%M')
)
print("Purged generated file %s" % settings.OUT_FILE)
print(update_msg)
print('Output from `svn update freewvsdb`:')
print(update_output)
os.remove(settings.OUT_FILE)
| UTF-8 | Python | false | false | 4,618 | py | 5 | processResults.py | 3 | 0.631875 | 0.630359 | 0 | 153 | 29.183007 | 79 |
VivekSumanth/Code-Everyday | 515,396,080,174 | d7b4e1a5d7c2015062256595efc0213c63308d4d | 5cf0343353f3f408f165ba3fd7c1e313265b7723 | /june 20/19.py | a0ef850974a89ec35a0dcbaa080348419593478c | []
| no_license | https://github.com/VivekSumanth/Code-Everyday | 8722876da8e1db5c541eb7d7266fceaba94ebcd0 | f979abecd32491951d9c28c24aa9d28cad3792d7 | refs/heads/master | 2023-01-31T16:51:06.116364 | 2020-12-13T02:50:27 | 2020-12-13T02:50:27 | 264,581,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # date:12/06/20
# 19. Remove Nth Node From End of List
# Medium
# 3137
# 229
# Add to List
# Share
# Given a linked list, remove the n-th node from the end of list and return its head.
# Example:
# Given linked list: 1->2->3->4->5, and n = 2.
# After removing the second node from the end, the linked list becomes 1->2->3->5.
# Note:
# Given n will always be valid.
# Follow up:
# Could you do this in one pass?
# Accepted
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
fast = head
temp = head
for i in range(n):
fast = fast.next
if fast == None:
return head.next
while(fast.next):
temp = temp.next
fast = fast.next
temp.next = temp.next.next
return head
# Runtime: 16 ms, faster than 93.94% of Python online submissions for Remove Nth Node From End of List.
# Memory Usage: 12.7 MB, less than 85.40% of Python online submissions for Remove Nth Node From End of List. | UTF-8 | Python | false | false | 1,293 | py | 140 | 19.py | 137 | 0.584687 | 0.554524 | 0 | 59 | 20.932203 | 108 |
DSchana/Final-Project-11 | 2,353,642,113,740 | f333ae12e14acb4ea8eeef06a7c0441988c22735 | 1d672d29dd7299947034a13e8a426a6b9bf4297d | /Compsci Final Project/attackSprites (old).py | 795c659b930a6fb414e05f1cb2167fbec641ee96 | []
| no_license | https://github.com/DSchana/Final-Project-11 | c2fc66b48fbd4fae165c25dfc058fd59faf0f3ff | 30cdf62443b1c152068ce11f5a913728248c66b5 | refs/heads/master | 2020-04-09T16:50:51.605326 | 2015-10-02T03:36:33 | 2015-10-02T03:36:33 | 30,880,912 | 0 | 2 | null | false | 2019-11-30T08:43:04 | 2015-02-16T18:04:03 | 2015-06-12T20:03:06 | 2015-10-02T03:36:33 | 125,239 | 0 | 2 | 1 | Python | false | false | #Cast Spell Sprites
#This code controls the animations for the player when he casts spells
#Load the sprites in the loading screen ******
from pygame import*
screen = display.set_mode((850,600))
#The following lists store the sprites for casting spells (attacking)
castSpellRightSpriteList = []
for i in range(6):
castSpellRightSpriteList.append(image.load("castSpellRight\\castSpellRight" + str(i) + ".png"))
castSpellLeftSpriteList = []
for i in range(6):
castSpellLeftSpriteList.append(image.load("castSpellLeft\\castSpellLeft" + str(i) + ".png"))
castSpellUpSpriteList = []
for i in range(6):
castSpellUpSpriteList.append(image.load("castSpellUp\\castSpellUp" + str(i) + ".png"))
castSpellDownSpriteList = []
for i in range(6):
castSpellDownSpriteList.append(image.load("castSpellDown\\castSpellDown" + str(i) + ".png"))
castSpellUpRightSpriteList = []
for i in range(6):
castSpellUpRightSpriteList.append(image.load("castSpellUpRight\\castSpellUpRight" + str(i) + ".png"))
castSpellUpLeftSpriteList = []
for i in range(6):
castSpellUpLeftSpriteList.append(image.load("castSpellUpLeft\\castSpellUpLeft" + str(i) + ".png"))
castSpellDownRightSpriteList = []
for i in range(6):
castSpellDownRightSpriteList.append(image.load("castSpellDownRight\\castSpellDownRight" + str(i) + ".png"))
castSpellDownLeftSpriteList = []
for i in range(6):
castSpellDownLeftSpriteList.append(image.load("castSpellDownLeft\\castSpellDownLeft" + str(i) + ".png"))
playerX, playerY = 400,300 #placeholder values
screen.fill((255,255,255))
direction = "upRight"
frame = 0
#I assume you have some variable that keeps track of the direction that the player is facing,
#in this file I just called it "direction"
running = True
while running:
mb = mouse.get_pressed()
mx,my = mouse.get_pos()
for e in event.get():
if e.type == QUIT:
running = False
if key.get_pressed()[K_SPACE]:
if direction == "left":
screen.fill((255,255,255)) #Replace fill with subsurface
screen.blit(castSpellLeftSpriteList[frame],(playerX,playerY))
time.wait(20) #Find better way to delay animations
frame += 1
if frame>5:
frame = 0
elif direction == "right":
screen.fill((255,255,255))
screen.blit(castSpellLeftSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
elif direction == "up":
screen.fill((255,255,255))
screen.blit(castSpellUpSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
elif direction == "down":
screen.fill((255,255,255))
screen.blit(castSpellDownSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
elif direction == "upRight":
screen.fill((255,255,255))
screen.blit(castSpellUpRightSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
elif direction == "upLeft":
screen.fill((255,255,255))
screen.blit(castSpellUpLeftSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
elif direction == "downRight":
screen.fill((255,255,255))
screen.blit(castSpellDownRightSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
elif direction == "downLeft":
screen.fill((255,255,255))
screen.blit(castSpellDownLeftSpriteList[frame],(playerX,playerY))
time.wait(20)
frame += 1
if frame>5:
frame = 0
display.flip()
quit()
| UTF-8 | Python | false | false | 4,162 | py | 29 | attackSprites (old).py | 24 | 0.586257 | 0.552138 | 0 | 126 | 32.02381 | 111 |
afcarl/ogb_lite | 8,727,373,570,311 | 331d199aecade2cdd4e5c6974675c9eee9220e61 | 1bacbea76a84f1e31f41a100cfdc5ad7704437b0 | /ogb_lite/utils/__init__.py | 3c45b8dbeda4574e5c9885bce7c6bdaf0aa956f5 | [
"MIT"
]
| permissive | https://github.com/afcarl/ogb_lite | b04997b0e7d8a190512dda0610b68086b69dfa3e | e575f979c712eb572bfb34e38381a7948b33f77c | refs/heads/main | 2023-04-19T13:16:36.384206 | 2021-04-27T21:42:55 | 2021-04-27T21:42:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
try:
from .mol import smiles2graph
except ImportError:
pass | UTF-8 | Python | false | false | 82 | py | 15 | __init__.py | 13 | 0.719512 | 0.695122 | 0 | 5 | 15.6 | 33 |
hjicheng/python-pc | 16,432,544,898,933 | 4a3b94aec938efe1412de8513608c8a0f880c954 | d486ec26db4138bf9f27a00c1048ab90096af8ee | /Python爬虫/21DAY/20190331/dianying.py | 78a3b91134f1ed56a55855d19ad1ca2a3b4916ff | []
| no_license | https://github.com/hjicheng/python-pc | 2e7f733956db9f038b40a17cdd4d6c51badb5ac8 | a968d0f8956dc2e422f17aa30df627755da3e979 | refs/heads/master | 2020-04-30T09:38:26.640890 | 2019-05-19T14:25:55 | 2019-05-19T14:25:55 | 176,754,068 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
#!/usr/bin/python3
#_author_='HuangJiCheng';
#date: 2019/3/31
import requests
from lxml import etree
BASE_URL = 'https://www.dytt8.net'
# 获取第一页数据
# 获取url进入详情页进行解析
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3608.4 Safari/537.36'
}
# 解析父页面,获取子页面(详情页)的url
def get_detail_urls(url):
response = requests.get(url, headers=HEADERS)
# 获取网页乱码,使用原生的content,查看源代码的编码方式进行解析
html = etree.HTML(response.text)
detail_urls = html.xpath('//table[@class="tbspan"]//a/@href')
detail_urls = map(lambda url:BASE_URL+url,detail_urls)
return detail_urls
# 解析详情页
def parse_detail_page(url):
movice = {}
response = requests.get(url,headers=HEADERS)
text = response.content.decode('gbk')
html =etree.HTML(text)
title = html.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')
movice['title'] = title
zoom = html.xpath('//div[@id="Zoom"]')[0]
if len(zoom.xpath('.//img/@src')) == 2:
movice_cover = zoom.xpath('.//img/@src')[0]
movice_screenhot = zoom.xpath('.//img/@src')[1]
movice['movice_cover'] = movice_cover
movice['movice_screenhot'] = movice_screenhot
infos = zoom.xpath('.//text()')
def parse_info(info,rule):
return info.replace(rule, '').strip()
for index,info in enumerate(infos):
if info.startswith('◎年 代'):
movice['year'] = parse_info(info,'◎年 代')
elif info.startswith('◎产 地'):
movice['country'] = parse_info(info,'◎产 地')
elif info.startswith('◎类 别'):
movice['category'] = parse_info(info,'◎类 别')
elif info.startswith('◎语 言'):
movice['language'] = parse_info(info,'◎语 言')
elif info.startswith('◎上映日期'):
movice['start_time'] = parse_info(info,'◎上映日期')
elif info.startswith('◎豆瓣评分'):
movice['star'] = parse_info(info,'◎豆瓣评分')
elif info.startswith('◎片 长'):
movice['duration'] = parse_info(info,'◎片 长')
elif info.startswith('◎导 演'):
movice['direct'] = parse_info(info,'◎导 演')
elif info.startswith('◎主 演'):
info_actor = parse_info(info, '◎主 演')
actors = [info_actor]
for x in range(index+1,len(infos)):
actor = infos[x].strip()
if actor.startswith("◎"):
break
actors.append(actor)
movice['actors'] = actors
elif info.startswith('◎标 签'):
movice['tags'] = parse_info(info,'◎标 签')
elif info.startswith('◎简 介 '):
info_profile = parse_info(info,'◎简 介 ')
profiles = []
for x in range(index+1,len(infos)):
profile = infos[x].strip()
profiles.append(profile)
if actor.startswith("◎"):
break
movice['profile'] = profiles
down_load = html.xpath('//td[@bgcolor="#fdfddf"]/a/@href')
movice['down_load'] = down_load
return movice
# 获取需要爬去的网页父页面
def spider():
movices = []
base_url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'
for i in range(1,8):
url = base_url.format(i)
# 获取所有的(子页面)详情页
movice_detail_url = get_detail_urls(url)
for i in movice_detail_url:
movice = parse_detail_page(i)
print(movice)
movices.append(movice)
if __name__ == '__main__':
spider() | UTF-8 | Python | false | false | 3,919 | py | 66 | dianying.py | 63 | 0.553688 | 0.539447 | 0 | 98 | 34.836735 | 127 |
JamesPino/pebl | 16,681,652,987,664 | 8b4f1e33da3372b857b42edfdc53b958ad1b0f71 | 85f4b8297056117a227e83c3b901c51ab1067c66 | /src/pebl/discretizer.py | 215b0e719571f14c604f27db5b94670415e9d505 | [
"MIT"
]
| permissive | https://github.com/JamesPino/pebl | 9a03a201d0e989619d3eaf2139f325272f6fb996 | f3677642e342e92133af0b8ebe038c6ea35ac30b | refs/heads/master | 2021-01-17T23:35:48.564297 | 2016-05-19T03:51:06 | 2016-05-19T03:51:06 | 59,163,795 | 0 | 0 | null | true | 2016-05-19T01:30:00 | 2016-05-19T01:29:59 | 2016-04-20T15:36:06 | 2011-11-16T01:19:34 | 2,127 | 0 | 0 | 0 | null | null | null | """ Collection of data discretization algorithms."""
import numpy as N
import data
from util import as_list
def maximum_entropy_discretize(indata, includevars=None, excludevars=[], numbins=3):
"""Performs a maximum-entropy discretization of data in-place.
Requirements for this implementation:
1. Try to make all bins equal sized (maximize the entropy)
2. If datum x==y in the original dataset, then disc(x)==disc(y)
For example, all datapoints with value 3.245 discretize to 1
even if it violates requirement 1.
3. Number of bins reflects only the non-missing data.
Example:
input: [3,7,4,4,4,5]
output: [0,1,0,0,0,1]
Note that all 4s discretize to 0, which makes bin sizes unequal.
Example:
input: [1,2,3,4,2,1,2,3,1,x,x,x]
output: [0,1,2,2,1,0,1,2,0,0,0,0]
Note that the missing data ('x') gets put in the bin with 0.0.
"""
# includevars can be an atom or list
includevars = as_list(includevars)
# determine the variables to discretize
includevars = includevars or range(indata.variables.size)
includevars = [v for v in includevars if v not in excludevars]
for v in includevars:
# "_nm" means "no missing"
vdata = indata.observations[:, v]
vmiss = indata.missing[:, v]
vdata_nm = vdata[-vmiss]
argsorted = vdata_nm.argsort()
if len(vdata_nm):
# Find bin edges (cutpoints) using no-missing
binsize = len(vdata_nm) // numbins
binedges = [vdata_nm[argsorted[binsize * b - 1]] for b in range(numbins)][1:]
# Discretize full data. Missings get added to bin with 0.0.
indata.observations[:, v] = N.searchsorted(binedges, vdata)
oldvar = indata.variables[v]
newvar = data.DiscreteVariable(oldvar.name, numbins)
newvar.__dict__.update(oldvar.__dict__) # copy any other data attached to variable
newvar.arity = numbins
indata.variables[v] = newvar
# if discretized all variables, then cast observations to int
if len(includevars) == indata.variables.size:
indata.observations = indata.observations.astype(int)
return indata
| UTF-8 | Python | false | false | 2,276 | py | 8 | discretizer.py | 7 | 0.628295 | 0.605888 | 0 | 67 | 32.970149 | 91 |
FihlaTV/opennames | 3,685,081,973,936 | 4dd82a11603ee62fa154844a479ae88b5abe9bd7 | b80abcaed517f9ae728a7813fa4180441bf5f9f7 | /peplib/source.py | 83904be6d12fed9be4bfa1ef820d71f7293ad9be | [
"MIT"
]
| permissive | https://github.com/FihlaTV/opennames | 35e03011f92941fb32d4031bbc8714226361358d | baf10a04c6f700cfb722a3f98d021754536df67d | refs/heads/master | 2021-01-23T07:55:18.874363 | 2017-03-06T19:38:06 | 2017-03-06T19:38:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import dataset
import countrynames
from pprint import pprint # noqa
from peplib.util import clean_obj, unique_objs
from peplib.config import DATABASE_URI
from peplib.schema import validate
log = logging.getLogger(__name__)
db = dataset.connect(DATABASE_URI)
class Source(object):
def __init__(self, source_id):
self.source_id = source_id
self.log = logging.getLogger(source_id)
self.entity_count = 0
self.entity_table = source_id
self.identities_table = source_id + '_identities'
self.other_names_table = source_id + '_other_names'
self.addresses_table = source_id + '_addresses'
def clear(self):
if self.entity_table in db:
db[self.entity_table].delete()
if self.identities_table in db:
db[self.identities_table].delete()
if self.other_names_table in db:
db[self.other_names_table].delete()
if self.addresses_table in db:
db[self.addresses_table].delete()
def emit(self, data):
data['identities'] = unique_objs(data.get('identities', []))
data['other_names'] = unique_objs(data.get('other_names', []))
data['addresses'] = unique_objs(data.get('addresses', []))
data = clean_obj(data)
try:
validate(data)
except Exception as ex:
log.exception(ex)
return
uid = data.get('uid')
for identity in data.pop('identities', []):
identity['uid'] = uid
db[self.identities_table].insert(identity)
for other_name in data.pop('other_names', []):
other_name['uid'] = uid
db[self.other_names_table].insert(other_name)
for address in data.pop('addresses', []):
address['uid'] = uid
db[self.addresses_table].insert(address)
db[self.entity_table].insert(data)
self.entity_count += 1
def normalize_country(self, name):
return countrynames.to_code(name)
def save(self):
self.log.info("Parsed %s entities", self.entity_count)
| UTF-8 | Python | false | false | 2,110 | py | 24 | source.py | 9 | 0.601896 | 0.600948 | 0 | 67 | 30.492537 | 70 |
mattbasta/monacle | 9,474,697,878,845 | 1bae5b9d457c0104f37ae798e37761b534850037 | 87730f381714ea6a35b5d8f0384391f1655db40c | /services/places.py | f0ec651229c8373f360eedefba556a1b4e2bcee8 | []
| no_license | https://github.com/mattbasta/monacle | 8a59a5d26a496ba6487381069505831b31188f2a | da9aa7f5ae3d2733df5e7b85b02688a090ed3f6b | refs/heads/master | 2016-09-06T08:38:22.794935 | 2012-09-01T04:30:08 | 2012-09-01T04:30:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import constants
import math
from factual import Factual
fs = constants.FACTUAL_SETTINGS
f = Factual(fs["key"], fs["secret"])
places = f.table("global")
geographies = f.table("world-geographies")
class LatLon(object):
def __init__(self, lat, lon):
self.lat, self.lon = map(float, (lat, lon))
print "Lat Lon:", lat, lon
def tup(self):
return self.lat, self.lon
def render(self):
return {"lat": self.lat, "lon": self.lon}
def coords(self):
return self
class Place(object):
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.center = kwargs.get("center", None)
if self.center:
assert isinstance(self.center, LatLon)
self.locality = kwargs.get("locality", None)
self.region = kwargs.get("region", None)
self.country = kwargs.get("country", None)
def render(self):
out = {"name": self.name,
"locality": self.locality,
"region": self.region,
"country": self.country}
if self.center:
out.update({"coords": self.center.render()})
return out
def coords(self):
# This is really hacky, but it prevents failures and it doesn't give
# weird results because this should only be used for sorting.
return self.center or LatLon(0, 0)
class Venue(object):
def __init__(self, name, address=None, place=None, location=None,
metadata=None):
self.name = name
self.address = address
self.place = place
self.location = location
self.metadata = metadata
def render(self):
out = {"type": "place",
"name": self.name,
"address": self.address,
"metadata": self.metadata}
if self.place:
out.update({"place": self.place.render()})
if self.location:
out.update({"coords": self.location.render()})
return out
def coords(self):
return self.location or LatLon(0, 0)
nauticalMilePerLat = 60.00721
nauticalMilePerLongitude = 60.10793
rad = math.pi / 180.0
milesPerNauticalMile = 1.15078
def _dist(loc1, loc2):
"""
Caclulate distance between two lat lons in NM
"""
lat1, lon1 = loc1.tup()
lat2, lon2 = loc2.tup()
yDistance = (lat2 - lat1) * nauticalMilePerLat
xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * \
(lon2 - lon1) * (nauticalMilePerLongitude / 2)
distance = math.sqrt(yDistance**2 + xDistance**2)
return distance * milesPerNauticalMile
def get_location(query, request, near="here", full_object=False):
if isinstance(query, LatLon):
if full_object:
raise "Cannot return full object for structured data."
return query
elif isinstance(query, Venue):
return query.location if not full_object else query
elif isinstance(query, Place):
return query.center if not full_object else query
print "PLACES: Getting location for", query
if query in ("here", "me", "where i am", "where i'm at", ):
return LatLon(request.prop("latitude"),
request.prop("longitude"))
# Search for a place first.
place = get_place(query, request, near=near, secondary=True)
if place:
return place.center if not full_object else place
# If it's not a place, search for a venue.
venue = get_venue(query, request, near=near)
if venue:
return venue.location if not full_object else venue
return None
PLACE_LIMIT_MIN = 10
SEC_PLACE_THRESH = 100 # Miles
def get_place(query, request, near="here", limit=1, secondary=False):
"""
Return a place object corresponding to the given query.
`near`:
A place, venue, or LatLon object that describes where the place should
be located near.
`limit`:
The maximum number of results to return.
`secondary`:
If this is not the object that is being returned to the user (i.e.: it
is being used for an auxiliary task like a venue's `near` parameter),
this should be set to True.
"""
print "PLACES: Getting place for", query
near = get_location(near, request)
query = query.strip()
q = geographies.search(query).filters({"name": {"$search": query}})
q = q.limit(PLACE_LIMIT_MIN if limit <= PLACE_LIMIT_MIN else limit)
placetypes = ["locality", "postcode", "colloquial"]
if not secondary:
placetypes += ["state", "county", "neighborhood", "timezone"]
q = q.filters({"placetype": {"$in": placetypes}})
# if near:
# q = q.geo({"$point": list(near.tup())})
# q = q.sort("$distance:asc")
q = q.select("name,country,latitude,longitude,placetype")
print "DEBUG:", q.path, q.params
results = q.data()
print "PLACES: %s > %s" % (query, results)
if not results:
return None
def process(result):
return Place(name=result["name"],
country=result["country"],
center=LatLon(result["latitude"], result["longitude"]))
#return process(results[0]) if limit == 1 else map(process, results)
# We have to do this until Factual starts letting us properly geo-sort.
results = map(process, results)
exact_results = filter(lambda x: x.name.lower() == query.lower(),
results)
if exact_results:
results = exact_results
if near:
near_loc = near.coords()
_dd = lambda x: _dist(near_loc, x.coords())
results = sorted(results, cmp=lambda x, y: cmp(_dd(x), _dd(y)))
if secondary:
results = filter(lambda r: _dd(r) <= SEC_PLACE_THRESH, results)
if not results:
return None
return results[0] if limit == 1 else results
def get_venue(query, request, near="here", limit=1):
print "PLACES: Getting venue for", query
near = get_location(near, request)
q = places.search(query).limit(limit)
if near:
print "Near", near
q = q.geo({"$circle": {"$center": near.tup(),
"$meters": 50000}})
q = q.sort("$distance:asc")
print "DEBUG:", q.path, q.params
results = q.data()
print "PLACES: %s > %s" % (query, results)
if not results:
return None
def process(result):
place = Place(locality=result["locality"],
region=result["region"],
country=result["country"])
return Venue(name=result["name"],
address=result["address"],
place=place,
metadata=result,
location=LatLon(result["latitude"], result["longitude"]))
return process(results[0]) if limit == 1 else map(process, results)
| UTF-8 | Python | false | false | 6,859 | py | 32 | places.py | 30 | 0.589299 | 0.580114 | 0 | 214 | 31.03271 | 78 |
spider17joker/sanji-develop | 1,254,130,455,268 | e699ae2a54aace792aa4f5f748d0686df09f3182 | 10ebacee368b089f5998ba061549eaa9d81712e7 | /genian/Agent/gnagent.py | 80673e4bc0e24bb48560094d419cb92d99a36a88 | [
"LicenseRef-scancode-other-permissive",
"Python-2.0"
]
| permissive | https://github.com/spider17joker/sanji-develop | d09fa754cf16aef45be67dbdcfa6e23bce00ad36 | a498f583127fffc75475c30ad873f00fed3137e1 | refs/heads/master | 2022-12-02T09:11:12.886654 | 2022-11-06T05:53:12 | 2022-11-06T05:53:12 | 85,659,009 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# 한글 주석 사용 가능하도록 encoding 추가
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import platform
import signal
import errno
import json
import hashlib
import select
import socket
import struct
import sqlite3
import subprocess
import threading
import xml.etree.ElementTree as ET
from datetime import timedelta
import Queue
import getpass
import time
import datetime
import shutil
import logging
from gnprocess import get_proc_list
import gntools
from gnconfigure import (
NotifyAgentDoNowFlag,
notify_q,
agent_notify_id_dict,
NotifyIdRegisterReqFlag,
)
from gnschedule import GnScheduleThread
from Lib.notifyifchange import net_if_change_check
from Lib.networkinfo import NetworkInfo
from Lib.debug import set_logging, get_log, set_logging_parameter
import Lib.encoding as encoding
import Lib.platform_profile as platform_profile
from include.common import (
PATH_VERIFY,
PATH_VERIFY_TRUNC,
AgentInstallPathInfo,
plugin_name,
)
import include.constants as CONST
import Lib.crypto as crypto
import Lib.fileversioninfo as version
# namedpipe
NAMEDPIPE_AGENT = 'agent_pipe'
NAMEDPIPE_START = 'start_pipe'
SIZE_UPDATE_MAX = 200
# Check Tool
IDS_CHECK_TOOL = "command -v "
IDS_DEVNULL = " >/dev/null"
ID_COMMAND = 0
ID_COMMAND_OPT_1 = 1
ID_COMMAND_OPT_2 = 2
# SOAP Function ID
ID_SOAP_LOGON = 0
ID_SOAP_GETMYPROFILE = 1
ID_SOAP_UPDATEINFO = 2
ID_SOAP_PROFILERESULT = 3
ID_SOAP_INSTALLINFO = 4
ID_SOAP_CUSTOMINFO = 5
# SOAP Function ID
IDS_SOAP = [
"logon",
"getmyprofile",
"updateinfo",
"profileresult",
"installinfo",
"custominfo",
]
# System application ID
ID_COMM = 0
# System application string
IDS_COMM_WGET = "wget"
IDS_COMM_CURL = "curl"
ID_RESULT_FAIL = 0
ID_RESULT_SUCCESS = 1
ID_RESULT = 0
ID_RESULT_STR = 1
soapStub = {
ID_SOAP_LOGON: [
"urn:logon",
"nodeid",
"mac",
"macspoof",
"hwstr",
"name",
"platform",
"osid",
"locale",
"ipaddr"
],
ID_SOAP_GETMYPROFILE: [
"urn:getmyprofile",
"nodeid",
"requst",
"version"
],
ID_SOAP_UPDATEINFO: [
"urn:update-information",
"infolist"
],
ID_SOAP_PROFILERESULT: [
"urn:profileresult",
"nodeid",
"err",
"pfres"
],
ID_SOAP_INSTALLINFO: [
"urn:installinfo",
"nodeid",
"ipaddr",
"swtype",
"newinstall"
],
ID_SOAP_CUSTOMINFO: [
"urn:getcustominfo",
"ipaddr",
],
}
# HTTP Header String
HTTP_HEAD = """' --no-cache -H "Content-Type:text/xml;charset=UTF-8" -H "SOAPAction: """
# SOAP String
SOAP_HEAD = """<soapenv:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:ctdagt">\
<soapenv:Header/>\
<soapenv:Body>\
"""
SOAP_TAIL = """</soapenv:Body>\
</soapenv:Envelope>
"""
AGENT_ACTION_INTERVAL = 0 # 액션수행 설정 - 수행주기
AGENT_ACTION_PLUGININFO = 1 # 액션수행 설정 - 플러그인 정보
AGENT_ACTION_PLUGINCONF = 2 # 액션수행 설정 - 플러그인 설정
PLUGIN_ACTION_INTERVALTYPE = 'action-intervaltype' # 수행주기 옵션
PLUGIN_ACTION_INTERVAL = 'action-interval' # 수행주기
PLUGIN_ACTION_TIMEGROUP = 'action-timegroup' # 수행시간대
PLUGIN_ACTION_RETRYTIME = 'action-retrytime' # 재시도시간
PLUGIN_ACTION_RETRYLIMIT = 'action-retrylimit' # 재시도회수
PLUGIN_ACTION_TIMETYPE = 'actiontime-type'
PLUGIN_ACTION_TIMEDATE = 'actiontime-date'
PLUGIN_ACTION_TIMEDOM = 'actiontime-dom'
PLUGIN_ACTION_TIMEDOW = 'actiontime-dow'
PLUGIN_ACTION_TIMEWOM = 'actiontime-wom'
PLUGIN_ACTION_TIMEDOW2 = 'actiontime-dow2'
PLUGIN_ACTION_TIMETIME = 'actiontime-time'
PLUGIN_ACTION_INTERVALTYPE_ONTIME = '1' # 한번만 수행
PLUGIN_ACTION_INTERVALTYPE_FIXEDTIME = '2' # 지정시각 수행
PLUGIN_ACTION_INTERVALTYPE_PERIODRUN = '3' # 주기적 수행
PLUGIN_ACTION_INTERVALTYPE_ALWAYSRUN = '4' # 항상 수행
PLUGIN_ACTION_INTERVALTYPE_OSSTART = '5' # 윈도우 시작시 한번만 수행
# 에이전트 수행 결과 타입
GN_AGENT_RESULTTYPE_PMS = 0 # PMS
GN_AGENT_RESULTTYPE_ACTION = 1 # 액션
GN_AGENT_RESULTTYPE_SMS = 2 # SMS (사용안함)
GN_AGENT_RESULTTYPE_CONST = 3 # 제약사항 (사용안함)
GN_AGENT_RESULTTYPE_PMSHISTORY = 4 # PMS 내역
# 에이전트 수행 결과 코드
GN_AGENT_RESULTCODE_SUCCESS = 10 # 성공
GN_AGENT_RESULTCODE_GENERAL = 11 # 실패
GN_AGENT_RESULTCODE_NOTSUPPORTED = 12 # 기능 지원안함
GN_AGENT_RESULTCODE_NEEDREBOOT = 13 # 리부팅 필요
# 프로파일 작업 타입
GN_PROFILE_WORKTYPE_PMS = 1 # PMS
GN_PROFILE_WORKTYPE_ACTION = 2 # 액션
GN_PROFILE_WORKTYPE_CONST = 3 # 제약사항 (사용안함)
GN_PROFILE_WORKTYPE_ELASTICSEARCH = 4 # 액션수행결과를 Elasticsearch 로 전송
# 프로파일 결과
GN_PROFILE_RESULT_PFTYPE = "pftype" # profileresult_t.pftype
GN_PROFILE_RESULT_PFID = "pfid" # profileresult_t.pfid
GN_PROFILE_RESULT_PFGROUPID = "pfgorupid" # profileresult_t.result_code
GN_PROFILE_RESULT_PFGROUPOP = "groupop" # profileresult_t.result_msg
GN_PROFILE_RESULT_RESULTCODE = "result-code" # profileresult_t.result_code
GN_PROFILE_RESULT_RESULTMSG = "result-msg" # profileresult_t.result_msg
GN_PROFILE_RESULT_ACTIVE = "active" # profileresult_t.active
KEYSTR_ACTION_RESULT = 'send.action.result'
KEYSTR_PLUGIN_RESULT = 'send.plugins.data'
KEYSTR_NODEID = 'node.id'
KEYSTR_NODE_NOUPDATE = 'node.aanoupdate'
KEYSTR_PROFILE_INTERVAL = 'profile.policyinterval' # 정책 수신 주기
KEYSTR_PROFILE_AUTHSTATUS = 'profile.authstatus'
KEYSTR_PROFILE_VERSION = 'profile.version'
NOTIFYID_REGISTER_REQ = 11 # 재인증
NOTIFYID_PMSNOW = 36 # PMS 수행
NOTIFYID_ACTIONNOW = 37 # 액션 수행
NOTIFYID_SYSCONSTNOW = 38 # 제약사항 수행
NOTIFYID_SMSNOW = 39 # SMS 수행
NOTIFYID_AGENTDONOW = 44 # PMS/액션/제약사항/SMS/라이센스 검사(PDMC) 수행
NOTIFYID_AGENTRUNSTAT = 45 # PMS/액션/라이센스 검사(PDMC) 작업 상태
NOTIFYID_NEWNOTICE = 46 # 새로운 공지사항 존재
NOTIFYID_NEWMESSAGE = 53 # 새로운 알림 메시지
NOTIFYID_NODEAUTHCHANGED = 55 # 인증 상태 변경
NOTIFYID_SENDDEBUGFILE = 66 # 지정된 목적지로 디버그 로그 전송
NOTIFYID_SELECTACTIONNOW = 72 # 지정 액션 수행
NOTIFYID_AGENTSTART = 73 # 에이전트 서비스 시작
NOTIFYID_AGENTSTOP = 74 # 에이전트 서비스 중지
NOTIFYID_AGENTUPDATE = 75 # 에이전트 업데이트
NOTIFYID_RECOVERY = 76 # 센서 장애복구
NOTIFYID_PERMMESSAGE = 77 # 고정메시지 업데이트
NOTIFYID_NODEPOISONSTATUS = 81 # 노드차단상태
NOTIFYID_LICSWPERMMESSAGE = 85 # 라이센스 고정메시지 업데이트
LOCAL_NOTIFYID_AGENTUPDATE = 900 # 에이전트 주기적 업데이트
# timeobj
TIMEOBJ_ANYTIME = 'ⓐANYTIME'
# dataservice
dataservice = None
# threads
threads = {}
# stop event
stop_event = None
threadDelegate = None
class Shell:
'''
Shell
'''
@staticmethod
def rm(name, is_dir=False):
'''
rm
'''
if (is_dir == True):
command = ''.join(["rm -rf " + name + "/*"])
else:
command = ''.join(["rm -f " + name])
list_result = Shell.execute(command)
if (list_result[ID_RESULT] == ID_RESULT_FAIL):
LOG.error("rm command fail. cmd='" + command + "'")
@staticmethod
def execute(command):
'''
execute
'''
popen = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdoutdata, stderrdata) = popen.communicate()
if (len(stderrdata) == 0):
return [ID_RESULT_SUCCESS, stdoutdata]
else:
return [ID_RESULT_FAIL, stderrdata]
class Dataservice:
''''''
conn = None
def __init__(self):
''''''
self.path = AgentInstallPathInfo()
self.logger = logging.getLogger("crumbs")
self.conn = sqlite3.connect(self.path.get_database_path(), isolation_level=None, check_same_thread=False)
query = 'CREATE TABLE IF NOT EXISTS INFODATA (KEYPATH varchar(1024), VALUE Text, Primary Key(KEYPATH))'
cur = self.conn.cursor()
cur.execute(query)
def close(self):
self.conn.close()
def get_data(self, keypath, default=''):
''''''
data = [keypath]
cur = self.conn.cursor()
query = 'SELECT VALUE FROM INFODATA WHERE KEYPATH = ?'
cur.execute(query, data)
rows = cur.fetchall()
for row in rows:
if not row[0]:
return default
else:
return row[0]
return default
def set_data(self, keypath, value):
''''''
data = (keypath, value)
cur = self.conn.cursor()
query = 'INSERT OR REPLACE INTO INFODATA (KEYPATH, VALUE) VALUES (?, ?)'
cur.execute(query, data)
def del_data(self, keypath):
''''''
data = [keypath]
cur = self.conn.cursor()
query = 'DELETE FROM INFODATA WHERE KEYPATH = ?'
cur.execute(query, data)
def delete_action_result_data(self):
data = []
cur = self.conn.cursor()
query = 'select * from infodata where KEYPATH like "keypath%"'
cur.execute(query, data)
rows = cur.fetchall()
for row in rows:
if not row[0]:
return None
else:
query = 'delete from infodata where KEYPATH=' + str('\'') + str(row[0].decode('utf-8') + str('\''))
cur.execute(query, data)
return None
class pluginbase(Shell):
"""
profile_result : [0] = PFTYPE_?, [1] = result-code, [2] = result-msg
"""
profile_result = {}
"""
updateinfo result
"""
list_data = []
"""
plugin config
"""
pluginconfig = None
"""
action 수행결과가 변경되지 않더라도 결과 전송
"""
always_profileresult_send = False
def __init__(self):
''''''
self.create_dicts()
def create_dicts(self):
''''''
pass
def run(self):
''''''
pass
def check_tool(self, toolid):
return (os.system(''.join([IDS_CHECK_TOOL, toolid, IDS_DEVNULL])) == 0)
def set_profileresult(self, worktype, resultcode, resultmsg):
''''''
self.profile_result[GN_PROFILE_RESULT_PFTYPE] = worktype
self.profile_result[GN_PROFILE_RESULT_RESULTCODE] = resultcode
self.profile_result[GN_PROFILE_RESULT_RESULTMSG] = resultmsg
class WaitableEvent:
''''''
def __init__(self):
''''''
self._read_fd, self._write_fd = os.pipe()
def wait(self, timeout=None):
''''''
rfds, wfds, efds = select.select([self._read_fd], [], [], timeout)
return self._read_fd in rfds
def isSet(self):
''''''
return self.wait(0)
def clear(self):
''''''
if self.isSet():
os.read(self._read_fd, 1)
def set(self):
''''''
if not self.isSet():
os.write(self._write_fd, b'1')
def fileno(self):
'''
Return the FD number of the read side of the pipe, allows this object to
be used with select.select().
'''
return self._read_fd
def __del__(self):
''''''
os.close(self._read_fd)
os.close(self._write_fd)
class GnThreadDelegate:
'''
별도의 parameter를 필요로하지 않는 작업들을 수행
thread함수 내에서 수행되는 것들로
thread stopwait, join 등을 사용하지 않는다.
'''
def policyinterval(self, gnthread):
if gnthread.name != 'policyinterval':
return
agentprofile()
@staticmethod
def policyrestart(interval):
if threads == None:
return
thread = threads.get('policyinterval')
interval = interval * 60
if thread == None:
thread = GnThread('policyinterval', stop_event,
interval, threadDelegate.policyinterval)
thread.start()
threads['policyinterval'] = thread
dataservice.set_data(KEYSTR_PROFILE_INTERVAL, interval)
return
value = dataservice.get_data(KEYSTR_PROFILE_INTERVAL, '0')
if interval != int(value):
"""
thread내에서 호출되는 함수이므로 시간값만 변경 (분단위)
"""
thread.timeout = interval
dataservice.set_data(KEYSTR_PROFILE_INTERVAL, interval)
class GnThread(threading.Thread):
''''''
name = None
stop = None # app 종료 이벤트
timeout = None # thread 시작 대기 시간
isexit = False # thread 중지 여부
isrun = False
isstart = False
delegate = None
def __init__(self, name, stop, timeout, delegate=None):
''''''
self.name = name
self.timeout = timeout
self.stop = stop
self.event = WaitableEvent()
self.event.clear()
self.isrun = False
self.delegate = delegate
self.log = get_log()
threading.Thread.__init__(self, name=self.name)
def __delete(self):
threading.Thread.__delete(self)
self.isstart = False
def start(self):
if not self.isstart:
threading.Thread.start(self)
self.isstart = True
def stopwait(self):
''''''
self.isexit = True
self.event.set()
def work(self):
''''''
pass
def finish(self):
''''''
self.isrun = False
self.isstart = False
def run(self):
''''''
while not self.stop.is_set() and self.isexit == False:
''''''
if (self.timeout != 0):
self.event.wait(self.timeout)
if not self.stop.is_set() and self.isexit == False:
try:
self.work()
if self.delegate != None:
self.delegate(self)
except Exception as e:
self.log.error("Thread run failed. ERROR=" + str(e) + ", name=" + str(self.name))
raise
# timeout = 0 이면 한반만 수행
if (self.timeout == 0):
break
self.isrun = False
self.isstart = False
self.finish()
class SoapDelegate:
'''
Soap Response 패킷중 xml 부분에 대한 파싱 클래스.
'''
def extract_dataset(self, attrib):
'''
extract_dataset
'''
tree = ET.parse(attrib)
root = tree.getroot()
return root[1][0]
def logon(self):
'''
logon
'''
try:
dataset = self.extract_dataset(IDS_SOAP[ID_SOAP_LOGON])
for d in dataset:
''''''
if (d.tag == "nodeid"):
self.dict_env["nodeid"] = d.text
if (d.tag == "result"):
''''''
if (d.text == "0"):
LOG.info("LOGON SUCCEED, NODEID : " + self.dict_env["nodeid"])
else:
LOG.error("LOGON FAIL")
except Exception as msg:
LOG.error("Exception MSG=" + str(msg))
def getmyprofile(self):
"""
SOAP_GETMYPROFILE 에 대한 리턴 값으로 아래와 같은 XML Element 객체를 리턴 받아서 파싱한다.
각 요소에 대해서 파싱하여 저장하지 않고 필요한 Element 만 파싱하여 저장한다.
:return: XML Element Dictionary Objects
"""
dataset = self.extract_dataset(IDS_SOAP[ID_SOAP_GETMYPROFILE])
self.dict_env['aalist'] = {}
self.dict_env['tolist'] = {}
LOG.debug('agent profile received')
for d in dataset:
''''''
tag = ''
text = ''
if d == None:
continue
tag = '' if type(d.tag) != str else str(d.tag)
text = '' if type(d.text) != str else str(d.text)
if d.tag == "policyinterval":
# 정책변경 검사주기 (시간)
GnThreadDelegate.policyrestart(int(d.text))
if d.tag == "kainterval":
dataservice.set_data(CONST.KEYSTR_NODE_KEEPALIVE, d.text)
elif d.tag == "aalist":
''''''
for x in d:
''''''
if x.tag == "pfid":
''''''
action_elem = {}
action_plugin = {}
pluginconf = {}
self.dict_env['aalist'][x.text] = [
action_elem, # 수행주기
action_plugin, # plugin 정보
pluginconf] # plugin 설정
elif x.tag == "action-plugin":
''''''
for y in x:
action_plugin[y.tag] = y.text
elif x.tag == "pluginconf":
''''''
pluginkey = ""
for y in x:
''''''
if y.tag == "key":
''''''
pluginkey = y.text
elif pluginkey:
pluginconf[pluginkey] = y.text
pluginkey = ""
else:
action_elem[x.tag] = x.text
elif d.tag == "tolist":
''''''
for x in d:
if x.tag == 'name':
timeobj = {}
self.dict_env['tolist'][x.text] = timeobj
else:
timeobj[x.tag] = x.text
elif d.tag == "aa-noupdate":
'''
aa-noupdate 정보값을 db에 저장.
'''
self.dict_env['aa-noupdate'] = d.text
dataservice.set_data(KEYSTR_NODE_NOUPDATE, self.dict_env.pop('aa-noupdate'))
elif d.tag == "version":
dataservice.set_data(KEYSTR_PROFILE_VERSION, d.text)
def updateinfo(self):
'''
updateinfo
'''
pass
def profileresult(self):
'''
profileresult
'''
pass
def installinfo(self):
'''
installinfo soap api를 통해서 서버에 있는 에이전트 관련 전체정보를 받아와서 리눅스 에이전트 관련 정보만 추출한다.
'''
self.dict_env['installinfo'] = {}
dataset = self.extract_dataset(IDS_SOAP[ID_SOAP_INSTALLINFO])
for vinfos in dataset.findall('vinfo'):
if "GenianLinux" == vinfos.find('modulename').text:
vinfo_list = list(vinfos)
for i in vinfo_list:
self.dict_env['installinfo'][i.tag] = i.text
def custominfo(self):
self.dict_env['custominfo'] = {}
dataset = self.extract_dataset(IDS_SOAP[ID_SOAP_CUSTOMINFO])
results = []
for el in dataset:
results.append((el.tag, el.text))
self.dict_env['custominfo'] = results
def __init__(self):
''''''
self.dict_env = {
}
class Soap:
'''
Soap Request 생성하는 클래스.(wget명령으로 전송하기 위한 body, xml 생성)
'''
def create_delegate(self):
'''
create_delegate
'''
self.delegate = SoapDelegate()
self.delegate.dict_env = self.dict_env
self.dict_soap[ID_SOAP_LOGON] = self.delegate.logon
self.dict_soap[ID_SOAP_GETMYPROFILE] = self.delegate.getmyprofile
self.dict_soap[ID_SOAP_UPDATEINFO] = self.delegate.updateinfo
self.dict_soap[ID_SOAP_PROFILERESULT] = self.delegate.profileresult
self.dict_soap[ID_SOAP_INSTALLINFO] = self.delegate.installinfo
self.dict_soap[ID_SOAP_CUSTOMINFO] = self.delegate.custominfo
def create_dicts(self):
'''
create_dicts
'''
self.dict_soap = {}
self.dict_env = {}
def set_url(self):
'''
set_url
'''
self.soap_url = "http://" + gntools.get_soap_server_ip_port() + "/agt"
def create_xml(self, soap_body):
'''
create_xml
'''
return ''.join([SOAP_HEAD, soap_body, SOAP_TAIL])
def add_sub_element(self, parent, elem_name, elem_text=None):
'''
add_sub_element
'''
elem = ET.SubElement(parent, elem_name)
elem.text = elem_text
return elem
def create_elements(self, soap_id, parent, elem, params, keyquery=""):
'''
create_elements
'''
if (soap_id == ID_SOAP_UPDATEINFO):
''''''
if (elem == "infolist"):
''''''
for paramlist in params:
''''''
for param in paramlist:
''''''
sub = self.add_sub_element(parent, elem)
self.add_sub_element(sub, "nodeid", self.dict_env.get("nodeid"))
self.add_sub_element(sub, "infoid", param[0])
self.add_sub_element(sub, "keyquery", keyquery)
# 2 = ILF_HAVESYSINFO
self.add_sub_element(sub, "flags", "2")
list_column = param[1]
for column in list_column:
self.add_sub_element(sub, "columnlist", column)
list_data = param[2]
for list_sub_data in list_data:
''''''
child = self.add_sub_element(sub, "datasetlist")
for data in list_sub_data:
self.add_sub_element(child, "dataset", data)
self.add_sub_element(child, "flags", "0")
elif soap_id == ID_SOAP_PROFILERESULT:
''''''
if elem == "nodeid":
self.add_sub_element(parent, elem, self.dict_env.get("nodeid"))
elif elem == "err":
''''''
sub = ET.SubElement(parent, elem)
self.add_sub_element(sub, "type", str(GN_AGENT_RESULTTYPE_ACTION))
self.add_sub_element(sub, "errorcode", str(GN_AGENT_RESULTCODE_SUCCESS))
self.add_sub_element(sub, "errormsg", "")
self.add_sub_element(sub, "nexttime", "0")
elif elem == "pfres":
''''''
for paramlist in params:
sub = ET.SubElement(parent, elem)
for p, v in paramlist.items():
''''''
self.add_sub_element(sub, p, str(v))
else:
self.add_sub_element(parent, elem, params)
def call(self, soap_id, params, keyquery=""):
'''
call
'''
delegate = self.dict_soap.get(soap_id)
first = True
i = 0
for elem in soapStub[soap_id]:
''''''
if (first):
''''''
root = ET.Element(elem)
parent = root
if soap_id == ID_SOAP_LOGON:
parent = ET.SubElement(root, IDS_SOAP[ID_SOAP_LOGON])
first = False
else:
''''''
if soap_id == ID_SOAP_UPDATEINFO or soap_id == ID_SOAP_PROFILERESULT:
self.create_elements(soap_id, parent, elem, params, keyquery)
else:
self.create_elements(soap_id, parent, elem, params[i])
i = i + 1
xml = self.create_xml(ET.tostring(root))
cmd = self.tool.dict_tool[ID_COMM]
param = self.tool.dict_cmd[ID_COMM][cmd]
soapfile = 'soapfile' + str(datetime.datetime.now().strftime('%Y%m%d%H%M%S%f'))
sendfile = open(soapfile, 'w')
sendfile.write(xml)
sendfile.close()
str_list = [
param[ID_COMMAND],
soapfile, HTTP_HEAD,
delegate.__name__,
'" ',
self.soap_url,
param[ID_COMMAND_OPT_1],
delegate.__name__
]
request = ''.join(str_list)
LOG.info("Request CMD=" + str(request))
list_result = Shell.execute(request)
os.unlink(soapfile)
if list_result[ID_RESULT] == ID_RESULT_SUCCESS:
''''''
delegate()
Shell.rm(IDS_SOAP[soap_id])
return ID_RESULT_SUCCESS
return ID_RESULT_FAIL
def __init__(self, tool):
'''
__init__
'''
self.create_dicts()
self.create_delegate()
self.tool = tool
self.set_url()
class Tool:
'''
Tool
'''
def create_dicts(self):
'''
create_dicts
'''
self.dict_cmd = {
ID_COMM: {
IDS_COMM_WGET: ["wget -q --post-file='", " -O "],
IDS_COMM_CURL: ["curl -s --data '", " -o "]
}
}
self.dict_tool = {
ID_COMM: None
}
def create_tool(self):
'''
create_tool
'''
for tool_id, cmds in self.dict_cmd.items():
'''
'''
for cmd in cmds:
''''''
if os.system(''.join([IDS_CHECK_TOOL, cmd, IDS_DEVNULL])) != 0:
continue
self.dict_tool[tool_id] = cmd
break
result = True
for tool_id, cmd in self.dict_tool.items():
''''''
if cmd == None:
''''''
result = False
return result
def __init__(self):
'''
__init__
'''
self.create_dicts()
class Util:
''''''
@staticmethod
def weekofmonth(year, month, day):
yearhigh = year / 100
yearlow = year % 100
if month <= 2:
yearlow -= 1
month += 12
week = (((21*yearhigh/4) + (5*yearlow/4) + (26*(month+1)/10) + 1 - 1) % 7)
return ((day-1+week)/7) if week > 3 else ((day-1+week)/7)+1
@staticmethod
def lastweekofmonth(year, month):
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
leap = 0
if (2 == month) and (0 == (year % 4) and 0 != (year % 100) or 0 == (year % 400)):
leap = 1
return Util.weekofmonth(year, month, days[month-1]+leap)
@staticmethod
def islastweekofmonth(year, month, day):
wom = Util.weekofmonth(year, month, day)
if wom > 0:
wom = pow(2, wom - 1)
if Util.lastweekofmonth(year, month) == wom:
return wom | 0x00000020
return wom
class Node:
def __init__(self):
self.net_if = {}
self.obj_network_info = None
self.soap_server_ip = None
self.soap_server_port = None
self.net_if_keys = ['ipstr', 'ip', 'name', 'mac']
self.local_ip_ntoa = None
self.local_ip_aton = None
self.ip_addr = None
self.mac_addr = None
self.nodeid = None
self.hostname = None
self.platform = None
self.osid = None
self.lang = None
def create_net_info(self):
local_ip_list = self.obj_network_info.get_local_ip_from_socket(self.soap_server_ip, self.soap_server_port)
if local_ip_list is None:
return None
for idx in range(len(local_ip_list)):
self.net_if[self.net_if_keys[idx]] = local_ip_list[idx]
return True
def create_node_info(self):
if self.create_net_info():
self.ip_addr = str(self.net_if["ip"])
self.mac_addr = self.net_if["mac"]
else:
self.ip_addr = '0.0.0.0'
self.mac_addr = 0
self.hostname = socket.gethostname()
self.platform = platform.system()
self.lang = encoding.get_system_lang()[0]
self.osid = platform_profile.get_system_osid()
LOG.info("HOST NAME={0}, PLATFORM={1}, LANG={2}, OSID={3}".format(self.hostname, self.platform, self.lang, self.osid))
def node_info_setup(self):
"""
각종 설정과 외부 함수 호출에 대해서 __init__함수와 분리하여 처리한다.
"""
self.obj_network_info = NetworkInfo()
self.soap_server_ip = gntools.get_soap_server_ip_port()
self.soap_server_port = 80
ip_info = self.obj_network_info.get_local_ip_from_socket(self.soap_server_ip, self.soap_server_port)
if ip_info is None:
return
self.local_ip_ntoa = ip_info[0]
self.local_ip_aton = ip_info[1]
class GnNotify(GnThread):
"""
GnNotify 객체의 Method Name을 agent_notify_id_dict{} 에서 사용하므로, Method Name 변경시
agent_notify_id_dict{} 에서도 함께 변경 되어야 한다.
"""
def __init__(self, name, stop, interval, sch_q, is_unittest=None):
self.interval = interval
self.jobs = notify_q
self.sch = sch_q
self.case_name = ""
self.case = self.case_default
self.noti_id = (0,)
self.obj_network_info = None
self.local_ip_netinfo = []
self.log = get_log()
if is_unittest is None:
GnThread.__init__(self, name, stop, self.interval)
def work(self):
while True:
try:
notifyId = self.jobs.get()
result = self.NotifyReceive(notifyId)
finally:
self.jobs.task_done()
def setup_network_info(self):
self.obj_network_info = NetworkInfo()
for el in self.obj_network_info.get_local_ip_from_route():
if el.get('gateway'):
self.local_ip_netinfo = el
break
def AgentActionDoNow(self):
"""
에이전트 액션 즉시 수행
plugin 중지
"""
hplugin = threads['pluginmgr']
hplugin.del_agentaction()
agentlogon()
if node.nodeid == None:
''''''
LOG.error("NodeID doesn't exist.")
agentexit()
return
agentprofile()
# plugin 재설정
hplugin.set_agentaction(soap.dict_env['aalist'], soap.dict_env['tolist'])
hplugin.start()
def AgentDoNow(self, id):
'''
노드대상작업지시 명령어중 즉시 수행 관련 명령.
'''
if int(id, 16) & NotifyAgentDoNowFlag.NOTYFLAGS_ACTIONNOW:
"""
에이전트 액션 즉시 수행
"""
LOG.info('Agent service event (Notify) received that is Perform agent action immediately.')
self.AgentActionDoNow()
if int(id, 16) & NotifyAgentDoNowFlag.NOTYFLAGS_PMSNOW:
"""
운영체제 업데이트 즉시 수행
"""
LOG.info('Agent service event (Notify) received that is Immediately update operating system. Not implemented.')
if int(id, 16) & NotifyAgentDoNowFlag.NOTYFLAGS_SMSNOW:
"""
노드 자산정보 즉시전송
"""
LOG.info('Agent service event (Notify) received that is Immediate transfer of node asset information.')
self.sch.put(schedule_q, NotifyAgentDoNowFlag.NOTYFLAGS_PMSNOW)
if int(id, 16) & NotifyAgentDoNowFlag.NOTYFLAGS_PROFILE:
"""
노드정책 재적용
"""
LOG.info('Agent service event (Notify) received that is Reapply the node policy.')
agentprofile()
def NotifyReceive(self, notify_id):
"""
pythonic하게 switch문 구현함.
:param notifyId : <type 'tuple'>: (44, '0x400')
:return: NotifyAgentDoNowFlag ID 값에 맵핑되는 함수의 주소를 리턴함.
"""
try:
self.case_name = "".join([agent_notify_id_dict.get(notify_id[0])])
except TypeError as e:
LOG.info('Agent raise exception ERRMSG=' + str(e))
self.case_name = "default"
if self.case_name in ("register_req", "agent_do_now"):
"""
에이전트 수행결과 관련 DB 값을 삭제함.
"""
dataservice.delete_action_result_data()
self.case = getattr(self, self.case_name, self.case_default)
self.noti_id = notify_id
return self.case()
def register_req(self):
"""
에이전트 재등록 요청
noti_id : (event_id, event_msg)
event_id : 11(gnagent.NOTIFYID_REGISTER_REQ)
event_msg : 0x0 : 정책서버로부터오는 이벤트
0x100 : notifyifchange 모듈에서 인터페이스 변경시 발생.
0x101 : KeepAlive 전송하기 전에 네트워크 체크시 실패하면 발생함.
"""
LOG.info('GnAgent received a center reconnection request message.')
netinfo = self.obj_network_info.get_local_ip_from_route()
if not len(netinfo):
LOG.info("Could not find network information. NOTIFY_ID=" + str(self.noti_id))
return True
result = {}
for el in netinfo:
if el.get('gateway'):
result = el
if self.noti_id[1] in "0x100, 0x101":
if result.get('ip') in self.local_ip_netinfo.get('ip'):
LOG.info('There are no changes to the local IP. ( Local IP = %s, IF_NAME = %s)' % (self.local_ip_netinfo.get('ip'), self.local_ip_netinfo.get('iface')))
return True
self.local_ip_netinfo = result
agentlogon()
if not len(node.nodeid):
"""
Center Logon 실패시 추가 작업이 필요함.
- 센터 재 접속 타이머 설정
- 에이전트 failsafe 타이머 설정
"""
LOG.error("NodeID doesn't exist.")
return False
agentprofile()
def agent_update(self):
"""
에이전트 업데이트
"""
LOG.info('Agent service update event (Notify) received.')
add_jobs(jobs_q, NOTIFYID_AGENTUPDATE)
def agent_do_now(self):
"""
에이전트 액션 즉시 수행
"""
LOG.info('Agent service event (Notify) received that is Perform agent action immediately.')
self.AgentDoNow(self.noti_id[1])
def node_auth_changed(self):
"""
에이전트 인증 상태 변경
"""
LOG.info('Agent service event (Notify) received that is Changing agent authentication status.')
agentprofile()
def case_default(self):
"""
에이전트 이벤트 예외 처리를 위한 케이스
"""
LOG.info('An unknown event has been received. (Notify ID = %s,Flage = %s)' % (self.noti_id, self.case_name))
class Namedpipe(GnThread):
''''''
name = None
interval = None
hpipe = None
def __init__(self, name, stop, interval, notify_q):
self.path = AgentInstallPathInfo()
self.name = ''.join([self.path.get_agent_path(), "/", name])
self.interval = interval
self.stop_event = stop
self.jobs = notify_q
try:
Shell.rm(self.name, False)
os.mkfifo(self.name, 0o666)
os.chmod(self.name, 0o666)
self.hpipe = os.open(self.name, os.O_RDWR | os.O_NONBLOCK)
except OSError as e:
LOG.error('Failed to create event socket. (ERROR = %s)' % (str(e)))
GnThread.__init__(self, name, stop_event, self.interval)
def work(self):
LOG.info('The event reception job has started.')
while not self.stop_event.is_set() and self.isexit == False:
try:
rlist = [self.hpipe]
wlist = []
xlist = []
rlist, wlist, xlist = select.select(rlist, wlist, xlist)
if self.hpipe in rlist:
try:
buf = os.read(self.hpipe, 8)
except OSError as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
buf = None
else:
LOG.info('Event reception failed. (ERROR = %s)' % (str(e)))
raise
'''
함수가 주기적으로 실행되지 않는다.
select()함수 추가로 인하여 리턴하는 코드를 삭제한다
'''
if buf is None or len(buf) == 0:
continue
pipeformat = 'II'
# msgtype 과 msg 내용의 길이 조회
data = struct.unpack(pipeformat, buf)
if data[0] == 0:
continue
if data[1] > 0:
buf = os.read(self.hpipe, data[1])
pipeformat = str(data[1]) + 's'
flag = struct.unpack(pipeformat, buf)
eventid = data[0]
notifys = (eventid, flag[0])
add_jobs(self.jobs, notifys)
except OSError as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK or e.errno == errno.EINTR:
LOG.info("Select.error exception ERR=" + str(e))
else:
LOG.error("Named Pipe exception ERR=" + str(e))
raise
def finish(self):
''''''
os.close(self.hpipe)
Shell.rm(self.name, False)
class PluginResultsendThread(GnThread):
''''''
interval = 10
def __init__(self, stop, interval):
''''''
self.interval = interval
GnThread.__init__(self, 'Plugin_Result_sender', stop, self.interval)
LOG.info(''.join(["Plugin Result Sender success. INTERVAL=%d" % self.interval]))
def work(self):
''''''
data = dataservice.get_data(KEYSTR_PLUGIN_RESULT)
if len(data) == 0:
return
buf = json.loads(data)
keyquery = ''
result = soap.call(ID_SOAP_UPDATEINFO, buf, keyquery)
if result != ID_RESULT_SUCCESS:
''''''
LOG.debug('Update Info send failed.')
return
dataservice.del_data(KEYSTR_PLUGIN_RESULT)
LOG.debug('Update Info send success.')
# action result send
data = dataservice.get_data(KEYSTR_ACTION_RESULT)
if len(data) == 0:
return
buf = json.loads(data)
keyquery = ''
result = soap.call(ID_SOAP_PROFILERESULT, buf, keyquery)
if result != ID_RESULT_SUCCESS:
''''''
LOG.debug('Action Result send failed.')
return
dataservice.del_data(KEYSTR_ACTION_RESULT)
LOG.debug('Action Result send success.')
class PluginThread(GnThread):
''''''
actionid = 0
pluginid = 0
modulename = ''
pluginname = ''
pluginconf = None
timeobject = None
stop = None
module_plugin = None
module_class = None
config = []
def __init__(self, actionid, pluginid, pluginname, pluginconf, stop):
self.actionid = actionid
self.pluginid = pluginid
self.modulename = None
self.pluginname = pluginname
self.pluginconf = pluginconf
self.stop = stop
for each in plugin_name:
if str(self.pluginid) in each[0]:
self.modulename = "Plugin" + "." + "ACT" + str(self.pluginid) + "_" + each[1] + "." + pluginname
break
self.module_plugin = __import__(self.modulename, fromlist=[self.pluginname])
self.module_class = getattr(self.module_plugin, self.pluginname)
def pluginstart(self, config, timeobject):
''''''
self.config = config
self.timeobject = timeobject
intervaltype = config[PLUGIN_ACTION_INTERVALTYPE]
interval = 0
if intervaltype == PLUGIN_ACTION_INTERVALTYPE_PERIODRUN:
# 주기적 수행은 초단위
interval = int(config[PLUGIN_ACTION_INTERVAL])
elif intervaltype == PLUGIN_ACTION_INTERVALTYPE_FIXEDTIME:
# 지정시간은 분단위로 구현
interval = 60
elif intervaltype == PLUGIN_ACTION_INTERVALTYPE_ALWAYSRUN:
"""
수행 주기 : 항상 실행
항상 수행으로 설정할 경우, 수행 주기는 1초로 설정함.
"""
interval = 1
GnThread.__init__(self, self.pluginname, self.stop, interval)
LOG.info("Plugin init success. ACTIONID=" + str(self.actionid) +
", PLUGINID=" + str(self.pluginid) +
", NAME=" + self.pluginname +
", INTERVALTYPE=" + intervaltype +
", INTERVAL=" + str(interval))
def immediate(self):
''''''
thimmediate = threading.Thread(target=self.work, args=())
thimmediate.start()
def inschedule(self):
''''''
# schedule 에서는 초단위는 계산하지 않음
# 현재 시간
nowdate = datetime.datetime.now().timetuple()
intervaltype = self.config[PLUGIN_ACTION_INTERVALTYPE]
try:
if intervaltype == PLUGIN_ACTION_INTERVALTYPE_ONTIME:
# 한번만 수행은 쓰레드가 1회 호출되고 더이상 호출되지 않음
return True
elif intervaltype == PLUGIN_ACTION_INTERVALTYPE_OSSTART:
# OS 시작시 수행
# 직전 수행시간 조회 후 수행시간 정보가 없으면 무조건 수행
keypath = 'keypath.' + str(self.actionid) + '.profile.rundatetime'
rundate = dataservice.get_data(keypath)
if not rundate:
return True
# 이전 수행 시간
olddate = datetime.datetime.strptime(
rundate, '%Y%m%d%H%M%S').timetuple()
# 부팅시간
result = Shell.execute("cat /proc/uptime | awk -F' ' '{print $1}'")
boottime = (datetime.datetime.now() - timedelta(seconds=long(float(result[1].replace("\n", ''))))).timetuple()
if boottime > olddate:
return True
elif intervaltype == PLUGIN_ACTION_INTERVALTYPE_PERIODRUN or intervaltype == PLUGIN_ACTION_INTERVALTYPE_ALWAYSRUN:
# 주기적 수행은 재시도 하지 않음
timegrp = self.config[PLUGIN_ACTION_TIMEGROUP]
timeobj = self.timeobject[timegrp]
nowtime = nowdate.tm_hour * 60 + nowdate.tm_min
if timegrp.encode('utf8') == TIMEOBJ_ANYTIME:
return True
# 월,화,수,목,금,토,일
wday = [2, 4, 8, 16, 32, 64, 1]
if (int(timeobj['day']) & wday[nowdate.tm_wday]) == wday[nowdate.tm_wday]:
if int(timeobj['fromtime']) <= nowtime and nowtime <= int(timeobj['totime']):
return True
return False
elif intervaltype == PLUGIN_ACTION_INTERVALTYPE_FIXEDTIME:
# 지정시각 수행
timetype = int(self.config[PLUGIN_ACTION_TIMETYPE])
timetime = self.config[PLUGIN_ACTION_TIMETIME].split(':')
hour = int(timetime[0])
minute = int(timetime[1])
retval = False
if timetype == 1:
# 매일+시각
retval = True
elif timetype == 2:
# 지정일+시각
values = self.config[PLUGIN_ACTION_TIMEDOM].split(',')
timedom = []
for val in values:
vals = val.split('-')
start = vals[0]
end = vals[1] if len(vals) == 2 else vals[0]
timedom.extend(range(int(start), int(end)+1))
for day in timedom:
if nowdate.tm_day == day:
retval = True
break
elif timetype == 3:
# 날짜+시각
value = self.config[PLUGIN_ACTION_TIMEDATE]
timedate = datetime.datetime.strptime(value, '%Y-%m-%d').timetuple()
if nowdate.tm_year == timedate.tm_year and nowdate.tm_mon == timedate.tm_mon and nowdate.tm_mday == timedate.tm_mday:
retval = True
elif timetype == 4:
# 요일+시각, windows 기준으로 일=0 으로 설정됨
values = self.config[PLUGIN_ACTION_TIMEDOW].split(',')
timedow = [int(6) if int(num) == 0 else int(num) - 1 for num in values]
for wday in timedow:
if nowdate.tm_wday == wday:
retval = True
break
elif timetype == 5:
# 주요일+시각
values = self.config[PLUGIN_ACTION_TIMEWOM].split(',')
wom = Util.weekofmonth(
nowdate.tm_year, nowdate.tm_mon, nowdate.tm_mday)
lastwom = Util.islastweekofmonth(nowdate.tm_year, nowdate.tm_mon, nowdate.tm_mday)
for timewom in values:
if 6 == int(timewom) and lastwom > 5:
# last week
retval = True
break
elif wom == int(timewom):
retval = True
break
if retval == True:
# 주가 일치하더라도 주중요일이 일치해야함으로 retval 을 초기화
retval = False
values = self.config[PLUGIN_ACTION_TIMEDOW2].split(',')
timedow = [int(6) if int(num) == 0 else int(num) - 1 for num in values]
for wday in timedow:
if nowdate.tm_wday == wday:
retval = True
break
# 오늘 수행여부와 관계없이 무조건 실행하지 않음
if retval == False:
return False
# 직전 수행시간 조회 후 수행시간 정보가 없으면 무조건 수행
keypath = 'keypath.' + \
str(self.actionid) + '.profile.rundatetime'
rundate = dataservice.get_data(keypath)
if not rundate:
return True
# 이전 수행 시간
olddate = datetime.datetime.strptime(rundate, '%Y%m%d%H%M%S').timetuple()
# 오늘 이전에 수행했으면 다시 수행
nowday = datetime.datetime(nowdate.tm_year, nowdate.tm_mon, nowdate.tm_day, 0, 0, 0)
oldday = datetime.datetime(olddate.tm_year, olddate.tm_mon, olddate.tm_day, 0, 0, 0)
if oldday < nowday:
return True
# 오늘 수행했는지 확인해서 이전 시간에 수행했으면 다시 수행
if nowdate.tm_year == olddate.tm_year and nowdate.tm_mon == olddate.tm_mon and nowdate.tm_mday == olddate.tm_mday:
if (olddate.tm_hour < hour or (olddate.tm_hour == hour and olddate.tm_min < minute)):
return True
except Exception as e:
LOG.error("Action schedule parsing error. ACTIONID=" + str(self.actionid) +
", PLUGINID=" + str(self.pluginid) +
", INTERVALTYPE=" + str(intervaltype) +
', ERROR=' + str(e))
return False
return False
def work(self):
''''''
if self.isrun == True:
# already run
return
# 주기적 수행 조건 검사
if self.inschedule() == False:
return
self.isrun = True
plugin = self.module_class()
plugin.pluginconfig = self.pluginconf
plugin.run()
"""
action 수행 결과
"""
plugin.profile_result[GN_PROFILE_RESULT_PFID] = self.actionid
plugin.profile_result[GN_PROFILE_RESULT_PFGROUPID] = ""
plugin.profile_result[GN_PROFILE_RESULT_PFGROUPOP] = 0
plugin.profile_result[GN_PROFILE_RESULT_ACTIVE] = 0
keypath = 'keypath.' + str(self.actionid) + '.profile.result'
storehash = dataservice.get_data(keypath)
resulthash = hashlib.sha256(str(plugin.profile_result)).hexdigest()
issend = (storehash != resulthash) or (plugin.always_profileresult_send)
if issend == 1:
''''''
data = dataservice.get_data(KEYSTR_ACTION_RESULT)
if len(data) == 0:
data = '[]'
buf = json.loads(data)
buf.append(plugin.profile_result)
dataservice.set_data(KEYSTR_ACTION_RESULT, json.dumps(buf))
dataservice.set_data(keypath, resulthash)
# plugin 수행 시간 저장
keypath = 'keypath.' + str(self.actionid) + '.profile.rundatetime'
dataservice.set_data(keypath, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
LOG.debug("Action result. ACTIONID=" + str(self.actionid) +
", PLUGIN_NAME=" + self.pluginname +
", PLUGINID=" + str(self.pluginid) +
", ISSEND=" + str(issend) +
', HASH=' + resulthash)
keypath = 'keypath.' + str(self.actionid) + '.updateinfo.result'
storehash = dataservice.get_data(keypath)
resulthash = hashlib.sha256(str(plugin.list_data)).hexdigest()
issend = (storehash != resulthash) or (plugin.always_profileresult_send)
if issend == 1:
''''''
data = dataservice.get_data(KEYSTR_PLUGIN_RESULT)
if len(data) == 0:
data = '[]'
buf = json.loads(data)
buf.append(plugin.list_data)
dataservice.set_data(KEYSTR_PLUGIN_RESULT, json.dumps(buf))
dataservice.set_data(keypath, resulthash)
LOG.debug("Update info. ACTIONID=" + str(self.actionid) +
", PLUGIN_NAME=" + self.pluginname +
", PLUGINID=" + str(self.pluginid) +
", ISSEND=" + str(issend) +
', HASH=' + resulthash)
self.isrun = False
class PluginManager(GnThread):
''''''
plugininfo = {}
plugin_threads = []
def __init__(self, stop, timeout):
GnThread.__init__(self, 'pluginmgr', stop, timeout)
def add_plugin(self, pluginid, pluginname):
self.plugininfo[pluginid] = pluginname
def set_agentaction(self, agentaction, timeobject):
# plugin별 thread 생성 후 대기
for pfid, actions in agentaction.items():
pid = int(actions[AGENT_ACTION_PLUGININFO]['key'])
# plugin config
pluginconf = actions[AGENT_ACTION_PLUGINCONF]
# 수행주기 옵션
intervaltype = actions[AGENT_ACTION_INTERVAL][PLUGIN_ACTION_INTERVALTYPE]
pluginname = self.plugininfo[pid]
try:
thplugin = PluginThread(pfid, pid, pluginname, pluginconf, self.stop)
thplugin.pluginstart(actions[AGENT_ACTION_INTERVAL], timeobject)
self.plugin_threads.append(thplugin)
except Exception as e:
LOG.error("Plugin not found. ACTIONID=" + str(pfid) +
", PLUGINID=" + str(pid) +
", PLUGINNM=" + str(pluginname) +
", ERROR='" + str(e) + "'")
def del_agentaction(self):
for t in self.plugin_threads:
LOG.info('Plugin finish. PLUGINID=' + str(t.pluginid) + ', PLUGINNM=' + str(t.pluginname))
t.stopwait()
t.join()
del self.plugin_threads[:]
def start(self):
''''''
for t in self.plugin_threads:
t.start()
def immediate(self):
for t in self.plugin_threads:
if t.isrun == True:
LOG.info("Plugin already run. PLUGINID=" + str(t.pluginid) + ", PLUGINNM=" + str(t.pluginname))
else:
LOG.info("Plugin immediate run. PLUGINID=" + str(t.pluginid) + ", PLUGINNM=" + str(t.pluginname))
t.immediate()
def work(self):
''''''
LOG.info('Plugin manage. COUNT=' + str(len(self.plugin_threads)))
def finish(self):
self.del_agentaction()
class GnUpdateRunner(GnThread):
'''
주기적으로 실행하면서 GnUpdate 인스턴스를 실행한다.
interval : GnUpdate 인스턴스를 실행하는 주기(sec)
'''
def __init__(self, name, stop, interval):
self.interval = interval
self.jobs = jobs_q
self.results = results_q
GnThread.__init__(self, name, stop, self.interval)
def work(self):
while True:
try:
notifyId = self.jobs.get()
result = self.check_for_update(notifyId)
self.results.put(result)
finally:
self.jobs.task_done()
def check_for_update(self, notifyId):
vermgr = GnUpdate()
vermgr.get_server_ip_port()
if vermgr.search_local_ip() is None:
LOG.info("Local IP lookup failed.")
return False
vermgr.check_update_network()
vermgr.check_update_version()
vermgr.install_product_info()
if notifyId is NOTIFYID_AGENTUPDATE:
vermgr.agent_update_for_action(False)
else:
vermgr.agent_update_for_action(True)
if vermgr.nCompare is 1:
vermgr.installing_updates()
LOG.info(" INSTALLING UPDATES SUCCESSFUL.")
else:
LOG.info("Linux Agent do not [update].")
return True
class GnUpdate(object):
def __init__(self):
self.server_ip = 0
self.nCompare = 0
self.netinfo = gntools.netinfo()
self.path = AgentInstallPathInfo()
try:
self.user_info = gntools.get_account_info().split(':')
except Exception as e:
LOG.info("No account information was found. ERRMSG=" + str(e))
self.user_info = ['', ]
def check_install_agent(self):
"""
GnStart의 설치 경로 확인.
미 설치시에는 리눅스 에이전트를 재설치해야 한다.
GnStart가 서비스로 등록되었는지 확인함.
서비스로 미 등록시에 리눅스 에이전트를 재설치해야 한다.
"""
if gntools.search('/etc/init.d', 'gnstart'):
LOG.info("Registered to GnStart service successfully.")
else:
LOG.info("Failed to register with GnStart service.")
return False
if gntools.installed_agent_path(self.path.get_agent_path(), self.path.p["AGENT_DIR_NAME"]):
LOG.info("GnStart is installed.")
else:
LOG.info("GnStart is not installed.")
return False
return True
def restart_agent(self):
pass
def check_run_agent(self):
"""
GnStart 프로세스의 실행 여부를 확인한다.
"""
if len(self.user_info) == 0:
self.user_info = ['', ]
proc_lists = get_proc_list()
user_proc_list = [x for x in proc_lists if x.user == self.user_info[0]]
if user_proc_list:
LOG.info("GnStart service started successfully.")
return True
else:
LOG.info("Failed to start GnStart service.")
def search_local_ip(self):
for el in self.netinfo:
if el['gateway']:
LOG.info("Local IP = " + str(el['ip']))
return el['ip']
LOG.info("Failed to retrieve local IP.")
def get_server_ip_port(self):
"""
정책서버 IP를 문자열로 저장 함.
Agent/soapserver 파일에서 읽어옴.
저장된 문자열 형식 : 172.29.99.251
"""
try:
fp = open(self.path.get_policy_server_address_file_path(), 'r')
line = fp.readline()
fp.close()
self.server_ip = line.replace("\n", "", 1)
return 1
except Exception as e:
LOG.error("Policy Server IP lookup failed. ERRMSG=" + str(e))
return 0
def check_update_network(self):
"""
업데이트 가능한 네트워크 그룹인지 체크한다.
0 - 업데이트 가능한 네트워크그룹이 전체임.
1 - 업데이트 가능한 특정 네트워크 그룹이 있음.
"""
agentprofile()
noupdate = dataservice.get_data(KEYSTR_NODE_NOUPDATE)
return noupdate
def install_product_info(self):
params = [
node.ip_addr,
]
try:
soap.call(ID_SOAP_CUSTOMINFO, params)
LOG.info('You have received agent custom information.')
except Exception as e:
LOG.info('Failed to receive custom information. ERR=%s' + str(e))
raise
custominfo = soap.dict_env.get("custominfo")
for el in custominfo:
if "productname" in el:
dataservice.set_data("custominfo.productname", el[1])
if "productcopyright" in el:
dataservice.set_data("custominfo.productcopyright", el[1])
def check_update_version(self):
"""
installinfo를 통해서 서버에 있는 에이전트의 최신 버전 정보를 가져온다.
"""
params = [
node.nodeid,
node.ip_addr,
"",
""
]
try:
soap.call(ID_SOAP_INSTALLINFO, params)
LOG.info('You have received agent installation information.')
except Exception as e:
LOG.info('Failed to receive installation information. ERR=%s', str(e))
raise
installinfo = soap.dict_env.get("installinfo")
if installinfo.get('version'):
installinfo_ver = (installinfo.get('version')).split('-')
dataservice.set_data("installinfo.version", installinfo_ver[0])
else:
LOG.info("Agent installation information does not exist.")
def agent_update_for_action(self, action):
node_ver = dataservice.get_data("node.version")
if len(node_ver) == 0:
return
v = map(int, node_ver.split('.'))
installinfo_ver = dataservice.get_data("installinfo.version")
if installinfo_ver is None:
return
if len(installinfo_ver) == 0:
return
i = map(int, installinfo_ver.split('.'))
if action:
'''
현재 버전(node_ver)이 서버에 있는 버전(installinfo_ver)보다 낮은 경우, 업그레이드 실시함.
:return:
'''
LOG.info('CHECK FOR [UPGRADE].')
if (v[0]) < (i[0]):
self.nCompare = 1
return
if (v[1]) < (i[1]):
self.nCompare = 1
return
if (v[2]) < (i[2]):
self.nCompare = 1
return
if (v[3]) < (i[3]):
self.nCompare = 1
return
LOG.info('Agent version is not Bigger.[INSTALL=%s < NODE=%s]' % (installinfo_ver, node_ver))
else:
'''
버전이 다른 경우, 업데이트 실시함.
:return:
'''
LOG.info('CHECK FOR [UPDATE].')
if (v[0]) != (i[0]):
self.nCompare = 1
return
if (v[1]) != (i[1]):
self.nCompare = 1
return
if (v[2]) != (i[2]):
self.nCompare = 1
return
if (v[3]) != (i[3]):
self.nCompare = 1
return
LOG.info('Agent version is SAME.[INSTALL=%s , NODE=%s]' % (installinfo_ver, node_ver))
def installing_updates(self):
"""
기 능 : 에이전트 설치 또는 업데이트
"""
dict_installinfo = soap.dict_env.get("installinfo")
file_name_list = (dict_installinfo.get('downloadurl')).split("/")
"""
rm -rf lnxagent.tar
"""
command = ''.join(["rm -rf " + (file_name_list[5])])
result = Shell.execute(command)
"""
wget https://172.29.99.93:443/Agent/GnAgent/lnxagent.sh --no-check-certificate
"""
command = ''.join(["wget " + (dict_installinfo.get('downloadurl')) + " --no-check-certificate"])
result = Shell.execute(command)
file_size = (dict_installinfo.get('downloadsize'))
file_hash = ((dict_installinfo.get('checkpath')).split('|'))[1]
command = ''.join(["sha256sum " + (file_name_list[5])])
result = Shell.execute(command)
downloaded_file_hash = result[1].split(" ")
if downloaded_file_hash[0] != file_hash:
LOG.info("do not match hash %s, %s" % (downloaded_file_hash, file_hash))
return
statinfo = os.stat(file_name_list[5])
if file_size != str(statinfo.st_size):
LOG.info("error size not match %s, %s" % (file_size, statinfo.st_size))
return
command = ''.join(['whoami'])
result = Shell.execute(command)
username = result[1].replace("\n", "", 1)
command = ''.join(['chown ' + username + ':' + username + ' ' + file_name_list[5]])
result = Shell.execute(command)
command = ''.join(["chmod 755 " + file_name_list[5]])
LOG.info("%s" % (command))
result = Shell.execute(command)
LOG.info("%s" % (result[1]))
file_name = os.path.splitext(file_name_list[5])
src_file = ''.join([self.path.get_agent_path() + '/' + file_name_list[5]])
dst_file = ''.join(["/tmp" + '/' + file_name[0] + '_' + self.server_ip + file_name[1]])
shutil.copy(src_file, dst_file)
"""
Agent/lnxagent.sh 172.29.99.93 &
Shell.execute(command) 대신 os.system(command) 명령으로 대체함.
이 유: lnxagent.sh 172.29.99.93 & 명령어를 실행 할 때 알 수없는 오류발생으로 스크립트가 중단됨.
"""
command = ''.join(["sudo " + dst_file + " &"])
LOG.info("command= " + str(command))
result = os.system(command)
LOG.info("RESULT MSG= " + str(result[1]))
sys.exit()
class GnUpdateKickingThread(GnThread):
"""
Genian Agent의 자동 업그레이드 여부를 주기적으로 체크함.
"""
def __init__(self, name, stop, interval):
''''''
self.jobs = jobs_q
self.results = results_q
GnThread.__init__(self, name, stop, interval)
def work(self):
'''
에이전트에서 주기적으로 업데이트 체크 함.
주기 : 6시간.
'''
add_jobs(self.jobs, LOCAL_NOTIFYID_AGENTUPDATE)
process(self.jobs, self.results)
class GnNotifyIFChangeThread(GnThread):
''''''
def __init__(self, name, stop, interval, queue):
''''''
self.jobs = queue
GnThread.__init__(self, name, stop, interval)
def work(self):
"""
네트워크 인터페이스 상태가 변경되면 NetLink Event를 수신하는 쓰레드 함수.
체크 대상 : 장비에 있는 모든 네트워크 인터페이스 (ethx, lo, wlanx, vlanx, etc)
"""
net_if_change_check(self.jobs)
def add_jobs(jobs_q, notifyId):
'''
GnUpdate 쓰레드의 입력큐에 처리 할 태스크 추가.
:param notifyId:
:return:
'''
jobs_q.put(notifyId)
return
def output(results_q):
'''
GnUpdate 쓰레드의 입력큐에 있는 태스크 처리 결과를 출력함.
:param results:
:return:
'''
done = 0
while not results_q.empty():
result = results_q.get_nowait()
done += 1
return done
def process(jobs_q, results_q):
'''
GnUpdate 쓰레드의 입력큐에 있는 태스크를 가져와서 처리함.
:return:
'''
canceled = False
try:
jobs_q.join()
except Exception as e:
canceled = True
LOG.error("Queue Exception. ERRMSG=%s" % (str(e)))
if results_q is not None:
if canceled:
done = results_q.qsize()
else:
done = output(results_q)
return
def process_noti(notify_q):
'''
GnNotify 쓰레드의 입력큐에 있는 태스크를 가져와서 처리함.
:return:
'''
try:
notify_q.join()
except Exception as e:
LOG.error("Queue Exception. ERRMSG=%s" % (str(e)))
return
def verify_agent():
"""
verify_agent
agent 무결성 검증
Verify.enc 파일의 작성된 파일목록의 hash값을 비교하여 변경여부를 확인한다.
"""
p = AgentInstallPathInfo()
try:
file_infos = version.get_plugin_version_list_from_file()
for el in file_infos:
el.file_path = el.file_path.replace("%TARGET%/", p.get_agent_path() + "/")
if "gnagent.py" in el.file_path:
dataservice.set_data("node.version", el.file_version)
try:
if os.path.isfile(el.file_path):
cf = open(el.file_path, 'r')
cf.close()
else:
continue
except Exception as e:
LOG.error("File not found. FILE=" + el.file_path + ", ERRMSG=" + str(e))
agentexit()
# file hash check
command = ''.join(["openssl dgst -sha256 ", el.file_path, " | awk '{print $2}'"])
shell_result = Shell.execute(command)
hashvalue = shell_result[1].replace('\n', '').upper()
filehash = el.file_hash.upper()
if filehash != hashvalue:
LOG.error("File hash mismatch. FILE=" + el.file_path + " ORGHASH=" + filehash + " DSTHASH=" + hashvalue)
agentexit()
LOG.info("Agent Verify Success.")
return 1
except Exception as e:
LOG.error("Verify file not found. FILE=" + PATH_VERIFY + " " + str(e))
return 0
def servercheck():
'''
정책서버 IP 확인
'''
try:
path = AgentInstallPathInfo()
fp = open(path.get_policy_server_address_file_path(), 'r')
line = fp.readline()
fp.close()
global SERVER_IP, SERVER_ADDR
SERVER_IP = line.replace("\n", "", 1)
SERVER_ADDR = struct.unpack('>L', socket.inet_aton(SERVER_IP))[0]
return 1
except Exception as e:
LOG.error("soapserver isn't found. " + str(e))
return 0
def signalhandler(signum, f):
''''''
LOG.fatal('signal received. SIG=' + str(signum))
stop_event.set()
def agentlogon():
LOG.info("agentlogon CURRENT DIR= " + str(os.getcwd()))
LOG.info("agentlogon CURRENT USER= " + str(getpass.getuser()))
params = [
"",
node.mac_addr,
"",
"",
node.hostname,
node.platform,
node.osid,
node.lang,
node.ip_addr
]
try:
soap.call(ID_SOAP_LOGON, params)
node.nodeid = soap.dict_env.get("nodeid")
# db store nodeid
dataservice.set_data(KEYSTR_NODEID, node.nodeid)
LOG.info('agent logon success. node.nodeid=' + str(node.nodeid))
except Exception as msg:
LOG.info('agent logon failed. ERRMSG=' + str(msg))
raise
def agentprofile():
''''''
params = [
node.nodeid,
"20",
""
]
try:
soap.call(ID_SOAP_GETMYPROFILE, params)
LOG.info('agent profile received success.')
"""
정책 서버에 접속한 결과를 저장하는 클래스 필요.
bool CGnJoin::CenterJoin(bool bRejoin) - Agent/GnAgent/GnJoin.cpp 함수 참조.
"""
except Exception as msg:
LOG.info('agent profile received failed.' + str(msg))
raise
def agentexit():
'''
agentexit
'''
LOG.info("Agent Stop.")
sys.exit()
def main(args):
'''
main
'''
LOG.info("Agent Start.")
# signal
signal.signal(signal.SIGINT, signalhandler)
global dataservice
dataservice = Dataservice()
global threadDelegate
threadDelegate = GnThreadDelegate()
global stop_event
stop_event = threading.Event()
if verify_agent() == 0:
agentexit()
ret = gntools.get_soap_server_ip_port()
if len(ret) is None or ret is None:
LOG.info('Serverip not found. GnStart exit')
sys.exit()
global node
node = Node()
node.node_info_setup()
node.create_node_info()
global tool
tool = Tool()
if tool.create_tool() == False:
agentexit()
global soap
soap = Soap(tool)
agentlogon()
while node.nodeid is None:
node.node_info_setup()
time.sleep(5)
agentprofile()
global jobs_q
jobs_q = Queue.Queue()
global results_q
results_q = Queue.Queue()
global schedule_q
schedule_q = Queue.Queue()
# thread start
thnamepipe = Namedpipe(NAMEDPIPE_AGENT, stop_event, 0, notify_q)
thnamepipe.start()
thsender = PluginResultsendThread(stop_event, 10)
thsender.start()
thplugin = PluginManager(stop_event, 600)
thplugin.add_plugin(7001, "osinfo")
thplugin.add_plugin(7002, "hwinfo")
thplugin.add_plugin(7003, "swinfo")
thplugin.add_plugin(7004, "netinfo")
thplugin.add_plugin(7005, "scriptctrl")
thplugin.set_agentaction(soap.dict_env['aalist'], soap.dict_env['tolist'])
thplugin.start()
notifymgr = GnNotify("GnNotify Mgr", stop_event, 1, schedule_q)
notifymgr.setup_network_info()
notifymgr.start()
versionmgr = GnUpdateRunner("gnupdate runnger", stop_event, 10)
versionmgr.start()
thgnupdatekicking = GnUpdateKickingThread("gnupdate-kicking", stop_event, 21600)
thgnupdatekicking.start()
thschedule = GnScheduleThread("gnSchedule", schedule_q, notify_q)
thschedule.setDaemon(True)
thschedule.start()
"""
윈도우 에이전트의 NetIfChange() 쓰레드의 실행주기에 따라서 기존 1초 -> 5초로 변경함.
"""
thnotifyifchange = GnNotifyIFChangeThread("threadNetIf", stop_event, 5, notify_q)
thnotifyifchange.start()
threads['namedpipe'] = thnamepipe
threads['resultsender'] = thsender
threads['pluginmgr'] = thplugin
threads['versionmgr'] = versionmgr
threads['updatekicking'] = thgnupdatekicking
threads['gnNotifyMgr'] = notifymgr
threads['gnSchedule'] = thschedule
threads['gnnotifyifchange'] = thnotifyifchange
"""
사용자 인증 모듈을 사용하지 않음.(GUI, Console를 통한 인증 기능 구현 필요)
현재 구현 상태는 Web browser로만 가능함.
"""
authstatus = "0"
"""
authstatus == "1" : 사용자 인증이 되지 않은 상태.
authstatus == "0" : 사용자 인증이 완료된 상태.
"""
if authstatus != "0" or len(authstatus) == 0:
send_notify = (44, '0x400')
add_jobs(notify_q, send_notify)
signal.pause()
# 모든 thread 가 종료될때까지 대기
for key in threads.keys():
t = threads[key]
t.stopwait()
t.join()
dataservice.close()
logging.shutdown()
agentexit()
if __name__ == '__main__':
LOG = set_logging_parameter(sys.argv)
if '-d' in sys.argv:
path = AgentInstallPathInfo()
path.set_devopt("unittest")
main(sys.argv)
| UTF-8 | Python | false | false | 73,093 | py | 217 | gnagent.py | 179 | 0.522476 | 0.514929 | 0 | 2,307 | 29.036411 | 214 |
gabriellaec/desoft-analise-exercicios | 1,640,677,513,512 | d0b5570d10812cbdf9c408a00856efde5f35c6d7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_177/ch20_2020_10_07_14_05_25_291698.py | 8fea6a9c7e340d16ed8c1c83ec5cf26f2014763b | []
| no_license | https://github.com/gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | distancia = float(input('Qual a distância da viagem em km?'))
if distancia <= 200:
passagem = 0.5 * distancia
print ('preço por quilometro = R$0.50')
else:
passagem = 100 + 0.45 * (distancia - 200)
print ('preço por quilometro = R$0.45')
print("Preço da passagem = R${0}".format(passagem)) | UTF-8 | Python | false | false | 309 | py | 35,359 | ch20_2020_10_07_14_05_25_291698.py | 35,352 | 0.64918 | 0.580328 | 0 | 8 | 37.25 | 61 |
claudeisakeeb/LoLBot | 2,199,023,287,774 | 668a53e18571c288946ee18ba2e8b561b643fb21 | 1c3381e143f511b22bcc1306b43998c815853c24 | /get_all_champion_skins.py | a0931a0231fa4a032bc834c0eadde54284f0e401 | []
| no_license | https://github.com/claudeisakeeb/LoLBot | cf981f43824a80f4fec769f385ccabf0a8a126b1 | b9972113ff7e6cefa55d83d874c2080fc6e0e4ae | refs/heads/master | 2022-12-07T20:13:01.772554 | 2020-08-22T21:39:38 | 2020-08-22T21:39:38 | 287,159,118 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
#Gets all champion skins in the current version of data dragon and puts them in 'all_champion_skins.json'
#The format is "key-ified skin name" --> [champion name, skin photo id, original skin name, skin ID]
champion_skin_dict = {}
version = requests.get("https://ddragon.leagueoflegends.com/api/versions.json").json()[0]
all_champions = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{version}/data/en_US/champion.json").json()["data"]
for champion in all_champions:
champion_skins = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{version}/data/en_US/champion/{champion}.json").json()["data"][champion]["skins"]
for skin in champion_skins:
key = champion.lower().replace(" ", "") if skin["name"] == "default" else skin["name"].lower().replace(" ", "").replace("'", "").replace("/", "")
champion_skin_dict[key] = [champion, skin["num"], f"Classic {champion}" if skin["name"] == "default" else skin["name"], skin["id"]]
with open("all_champion_skins.json", "w") as f:
json.dump(champion_skin_dict, f)
print("Successfully generated all champion skins.") | UTF-8 | Python | false | false | 1,137 | py | 16 | get_all_champion_skins.py | 7 | 0.683377 | 0.682498 | 0 | 19 | 58.894737 | 156 |
AInitikesh/brain-waves | 16,217,796,542,770 | 6a67c131d1cff1eb7ea7b2f5061b9136bb7eca5e | a0765ea5dc34a12839d728d837e2e0668623a4f7 | /app/__init__.py | d918d760a219bc04c5f616e8486c2f1938bba32a | []
| no_license | https://github.com/AInitikesh/brain-waves | 3a82d71dc331de46bd206fe82f38cd31ea6c496c | 286cb48512dc86406a0e86051b3bf86741492aee | refs/heads/master | 2020-04-29T07:43:50.506553 | 2019-03-16T22:43:53 | 2019-03-16T22:43:53 | 175,963,247 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Import flask and template operators
from flask import Flask, render_template, jsonify
from flask import request
from app.helper.converter import insertDb
import os
from flask_cors import CORS
import datetime
# Import SQLAlchemy
from flask_sqlalchemy import SQLAlchemy
# Define the WSGI application object
app = Flask(__name__, static_url_path='/static')
cors = CORS(app)
# Configurations
app.config.from_object('config')
db = SQLAlchemy(app)
files = os.listdir('CS')
client = app.config["CLIENT"]
otherParty = app.config["OTHER_PARTY"]
final_data = insertDb(client, otherParty)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/merge')
def merge():
id = request.args.get('id')
for data in final_data:
if data[4] == int(id):
data[3] = "MERGED"
return "success"
@app.route("/results")
def index():
## get the last date the webscraper was ru
size = request.args.get('size')
page = request.args.get('page')
tradeDateFrom = request.args.get('tradeFrom')
tradeDateTo = request.args.get('tradeTo')
setllDateFrom = request.args.get('settlFrom')
setllDateTo = request.args.get('settlTo')
currency = request.args.get('currency')
rate = request.args.get('rate')
status = request.args.get('status')
return_data = final_data
if status != None and status != "":
return_data=[data for data in final_data if data[3] == status or data[3] == "MERGED"]
if tradeDateFrom != None and tradeDateFrom != "":
dateSplit = tradeDateFrom.split('-')
d1 = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2]))
temp_return = []
for data in return_data:
dataDate = data[0]['30T'].split('/')
if len(dataDate) == 3:
d2 = datetime.datetime(int(dataDate[2]), int(dataDate[1]), int(dataDate[0]))
if d1 < d2:
temp_return.append(data)
return_data = temp_return
if tradeDateTo != None and tradeDateTo != "":
dateSplit = tradeDateTo.split('-')
d1 = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2]))
temp_return = []
for data in return_data:
dataDate = data[0]['30T'].split('/')
if len(dataDate) == 3:
d2 = datetime.datetime(int(dataDate[2]), int(dataDate[1]), int(dataDate[0]))
if d2 < d1:
temp_return.append(data)
return_data = temp_return
if setllDateFrom != None and setllDateFrom != "":
dateSplit = setllDateFrom.split('-')
d1 = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2]))
temp_return = []
for data in return_data:
dataDate = data[0]['30V'].split('/')
if len(dataDate) == 3:
d2 = datetime.datetime(int(dataDate[2]), int(dataDate[1]), int(dataDate[0]))
if d1 < d2:
temp_return.append(data)
return_data = temp_return
if setllDateTo != None and setllDateTo != "":
dateSplit = setllDateTo.split('-')
d1 = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2]))
temp_return = []
for data in return_data:
dataDate = data[0]['30V'].split('/')
if len(dataDate) == 3:
d2 = datetime.datetime(int(dataDate[2]), int(dataDate[1]), int(dataDate[0]))
if d2 < d1:
temp_return.append(data)
return_data = temp_return
if rate != None and rate != "":
return_data=[data for data in return_data if data[0]['36'] == rate or data[1]['36'] == dateSplit]
if currency != None and currency != "":
return_data=[data for data in return_data if data[0]['32B'][1] == currency or data[0]['33B'][1] == currency or data[1]['32B'][1] == currency or data[1]['33B'][1] == currency]
if size == None:
size = 10
if page == None:
page = 0
size = int(size)
page = int(page)
return jsonify(return_data[page*size:page*size + size])
| UTF-8 | Python | false | false | 4,148 | py | 6 | __init__.py | 4 | 0.585824 | 0.565333 | 0 | 119 | 33.831933 | 182 |
prmelehan/AITesting | 558,345,783,962 | e28b2ea932f89bed97473e2fe299ee4017f151f3 | 3a9ac8452e0f3b97b39f413f7da55c6d25e69354 | /LSTM/finance/stocks/stock_predictor.py | bbeb0e574bf5bd879579c43df89eee570ad7e43e | []
| no_license | https://github.com/prmelehan/AITesting | b0d685b6b0e2157b361026efe46183040eda5f75 | bc59114b235c1771b34e6500f5c8bb95a1698f01 | refs/heads/master | 2021-01-19T13:22:47.462452 | 2018-01-29T18:01:11 | 2018-01-29T18:01:11 | 100,838,854 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # this will be able to take any datasheet from NASDAQ and try to predict the stock price of that company
# dependencies
from __future__ import print_function
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.layers import Bidirectional
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping, Callback
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import math
import requests
import pandas_datareader as web
import datetime
# HYPER PARAMETERS
look_back = 7
#epochs = 1000
epochs = 5
batch_size = 32
# helper for logging the history
class AccuracyHistory(Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
def get_data(stock_symbol):
start = datetime.datetime(2008, 1, 1)
end = datetime.datetime.now()
print("Downloading historical quotes for " + str(stock_symbol) + "...")
data = web.DataReader(stock_symbol, 'yahoo', start, end)
print("Done")
# save this data just incase
data.to_csv('data/' + str(stock_symbol) + ".csv")
closing = data.Close.values.astype('float32')
opening = data.Open.values.astype('float32')
high = data.High.values.astype('float32')
low = data.Low.values.astype('float32')
# reshape to column vector
closing = closing.reshape(len(closing), 1)
opening = opening.reshape(len(opening), 1)
high = high.reshape(len(high), 1)
low = low.reshape(len(low), 1)
prices = np.zeros((len(data), 4))
print("Prices Shape:", prices.shape)
# reassign the values to the tensor
for index, val in enumerate(prices):
prices[index, 0] = closing[index]
prices[index, 1] = opening[index]
prices[index, 2] = high[index]
prices[index, 3] = low[index]
return prices
def training_split(data, split):
trainingSize = int(len(data) * float(split))
testingSize = len(data - trainingSize)
trainingSet, testingSet = data[0:trainingSize, :], data[trainingSize:len(data), :]
return trainingSet, testingSet
# convert an array of values into a time series dataset
# in form
# X Y
# t-look_back+1, t-look_back+2, ..., t t+1
def create_dataset(dataset, look_back):
dataX, dataY = [], []
for index in range(len(dataset)-look_back-1):
a = dataset[index:(index+look_back), :]
dataX.append(a)
dataY.append(dataset[index + look_back, :])
return np.array(dataX), np.array(dataY)
def train_reshape(trainX, testX):
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], trainX.shape[2]))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], testX.shape[2]))
return trainX, testX
def create_model(look_back):
model = Sequential()
# add the first Bidirectional LSTM layer
model.add(Bidirectional(LSTM(32, return_sequences=True), input_shape=(look_back, 4)))
model.add(Dropout(0.2))
# add the second LSTM block
model.add(Bidirectional(LSTM(64, return_sequences=True)))
model.add(Dropout(0.2))
# add the final LSTM block
model.add(Bidirectional(LSTM(32, return_sequences=False)))
# add the dense layer to output the number from our feature representations
model.add(Dense(units=4))
# compile the model with mse as the loss function and adamoptimizer as the optimizer
model.compile(loss='mse', optimizer='adam')
return model
def plot(prices, predictions, stock_symbol, prediction_title):
plt.plot(prices, label="Actual")
plt.plot(predictions, label=prediction_title)
plt.title(str(stock_symbol) + " Actual Price vs. Predicted Price")
plt.ylabel("Price (USD)")
plt.xlabel("Time")
plt.legend()
plt.show()
def main():
symbol = str(input("Stock Symbol (e.g AAPL): "))
prices = get_data(symbol)
# normalize our data
normalizer = MinMaxScaler(feature_range=(0,1))
prices = normalizer.fit_transform(prices)
training, testing = training_split(prices, 0.60)
trainX, trainY = create_dataset(training, look_back)
testX, testY = create_dataset(testing, look_back)
trainX, testX = train_reshape(trainX, testX)
model = create_model(look_back)
checkpoint = ModelCheckpoint("model_checkpoints/" + symbol + "_checkpoint.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
# --- debugging ------
history = AccuracyHistory()
# --------------------
# train the model
callbacks = [checkpoint, history]
model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, callbacks=callbacks)
# save the model to use later on
model.save('saved_models/' + symbol + ".h5")
trainPredictions = model.predict(trainX)
testPredictions = model.predict(testX)
trainPredictions = normalizer.inverse_transform(trainPredictions)
trainY = normalizer.inverse_transform(trainY)
testPredictions = normalizer.inverse_transform(testPredictions)
testY = normalizer.inverse_transform(testY)
trainingScore = math.sqrt(mean_squared_error(trainY[:, 0], trainPredictions[:, 0]))
testingScore = math.sqrt(mean_squared_error(testY[:, 0], testPredictions[:, 0]))
print("Training Score: %.5f RMSE" % (trainingScore))
print("Testing Score: %.5f RMSE" % (testingScore))
# unnormalize our prices for plotting
prices = normalizer.inverse_transform(prices)
trainingPlot = np.empty_like(prices)
trainingPlot[:, :] = np.nan
trainingPlot[look_back:len(trainPredictions)+look_back, :] = trainPredictions
testingPlot = np.empty_like(prices)
testingPlot[:, :] = np.nan
testingPlot[len(trainPredictions)+(look_back*2)+1:len(prices)-1, :] = testPredictions
plot(prices[:, 0], trainingPlot[:, 0], symbol, "Training Prediction")
plot(prices[:, 0], testingPlot[:, 0], symbol, "Testing Prediction")
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 6,289 | py | 13 | stock_predictor.py | 1 | 0.681507 | 0.669423 | 0 | 183 | 33.36612 | 182 |
liu111xiao111/UItest | 13,469,017,483,568 | 618fc32467d41e7a299bb24e88055074aa8e0df0 | b4afb44b8f483c048716fe12d778186ce68ac846 | /pages/ios/ffan/square_shopping_category_page.py | cba6db073d4631c64b78a0d126e1bcf53a18b027 | []
| no_license | https://github.com/liu111xiao111/UItest | 64309b2c85f6d2334d64bb0875ba9ced459ebb1e | 67e2acc9a99da81022e286e8d8ec7ccb12636ff3 | refs/heads/master | 2021-09-01T18:30:28.044296 | 2017-12-28T04:36:46 | 2017-12-28T04:36:46 | 115,585,226 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from api.api import API
from pages.ios.common.superPage import SuperPage
from pages.ios.ffan.square_shopping_category_page_configs import SquareShoppingPageConfigs
class SquareShoppingPage(SuperPage):
'''
作者 宋波
首页=>广场=>爱购物
'''
def __init__(self, testcase, driver, logger):
super(SquareShoppingPage, self).__init__(testcase=testcase, driver=driver, logger=logger);
'''
usage : 进入广场模块,检查是否加载出来
'''
def validSelf(self):
API().assertElementByName(self.testcase, self.driver, self.logger,
SquareShoppingPageConfigs.name_commodity_title_st,
SquareShoppingPageConfigs.click_on_button_timeout)
def clickOnSubCommodity(self):
'''
usage: click on the sub-commodity button.
'''
tempText = API().getTextByXpath(self.testcase, self.driver, self.logger,
SquareShoppingPageConfigs.xpath_sub_commodity_st,
SquareShoppingPageConfigs.get_timeout)
API().clickElementByXpath(self.testcase, self.driver, self.logger,
SquareShoppingPageConfigs.xpath_sub_commodity_st,
SquareShoppingPageConfigs.click_on_button_timeout)
return tempText
if __name__ == '__main__':
pass;
| UTF-8 | Python | false | false | 1,460 | py | 656 | square_shopping_category_page.py | 648 | 0.597301 | 0.596591 | 0 | 41 | 33.341463 | 98 |
0hexit/zds-site | 15,255,723,884,208 | b743e83d4ecf4a00982ab07fa1e6523ba36b509b | 58042e54f78a767a81db52378c50622e043d6a07 | /zds/member/models.py | 04765857286f52a61da14f28fd0c45da9b7bc91e | []
| no_license | https://github.com/0hexit/zds-site | b7a9235b68c2a2c8b4715b4c4fbf1d980f0dcc87 | bcc83b852c650e70553c8eba83193da10f6332ab | refs/heads/master | 2021-06-03T05:22:51.339807 | 2014-01-30T00:55:45 | 2014-01-30T00:55:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from hashlib import md5
from zds.forum.models import Post, Topic
from zds.utils.models import Alert
from zds.tutorial.models import Tutorial
from zds.article.models import Article
from django.contrib.gis.geoip import GeoIP
import uuid
class Profile(models.Model):
'''Represents an user profile'''
class Meta:
verbose_name = 'Profil'
verbose_name_plural = 'Profils'
permissions = (
("moderation", u"Modérer un membre"),
("show_ip", u"Afficher les IP d'un membre"),
)
user = models.ForeignKey(User, unique=True, verbose_name='Utilisateur')
last_ip_address = models.CharField('Adresse IP', max_length=15, blank=True, null=True)
site = models.CharField('Site internet', max_length=128, blank=True)
show_email = models.BooleanField('Afficher adresse mail publiquement',
default=True)
avatar_url = models.CharField(
'URL de l\'avatar', max_length=128, null=True, blank=True
)
biography = models.TextField('Biographie', blank=True)
karma = models.IntegerField('Karma', default=0)
sign = models.TextField('Signature', blank=True)
show_sign = models.BooleanField('Voir les signatures',
default=True)
hover_or_click = models.BooleanField('Survol ou click ?',
default=True)
can_read = models.BooleanField('Possibilité de lire', default=True)
end_ban_read = models.DateTimeField('Fin d\'interdiction de lecture', null=True, blank=True)
can_write = models.BooleanField('Possibilité d\'écrire', default=True)
end_ban_write = models.DateTimeField('Fin d\'interdiction d\'ecrire', null=True, blank=True)
def __unicode__(self):
'''Textual forum of a profile'''
return self.user.username
def get_absolute_url(self):
'''Absolute URL to the profile page'''
return '/membres/voir/{0}'.format(self.user.username)
def get_city(self):
''' return physical adress by geolocalisation '''
g = GeoIP()
geo = g.city(self.last_ip_address)
return u'{0}, {1}'.format(str(geo['city']), str(geo['country_name']))
def get_avatar_url(self):
'''Avatar URL (using custom URL or Gravatar)'''
if self.avatar_url:
return self.avatar_url
else:
return 'https://secure.gravatar.com/avatar/{0}?d=identicon'.format(md5(self.user.email).hexdigest())
def get_post_count(self):
'''Number of messages posted'''
return Post.objects.filter(author__pk=self.user.pk).count()
def get_topic_count(self):
'''Number of threads created'''
return Topic.objects.filter(author=self.user).count()
def get_tuto_count(self):
'''Number of tutos created'''
return Tutorial.objects.filter(authors__in=[self.user]).count()
def get_tutos(self):
'''Get all tutorials of the user'''
return Tutorial.objects.filter(authors__in = [self.user]).all()
def get_draft_tutos(self):
'''Tutorial in draft'''
return Tutorial.objects.filter(authors__in=[self.user], sha_public__isnull=True, sha_draft__isnull=False).all()
def get_public_tutos(self):
'''Tutorial in public'''
return Tutorial.objects.filter(authors__in=[self.user], sha_public__isnull=False).all()
def get_validate_tutos(self):
'''Tutorial in validation'''
return Tutorial.objects.filter(authors__in=[self.user], sha_validation__isnull=False).all()
def get_beta_tutos(self):
'''Tutorial in beta'''
return Tutorial.objects.filter(authors__in=[self.user], sha_beta__isnull=False).all()
def get_articles(self):
'''Get all articles of the user'''
return Article.objects.filter(authors__in=[self.user]).all()
def get_posts(self):
return Post.objects.filter(author=self.user).all()
def get_invisible_posts_count(self):
return Post.objects.filter(is_visible=False, author=self.user).count()
def get_alerts_posts_count(self):
return Alert.objects.filter(author=self.user).count()
def can_read_now(self):
if self.end_ban_read:
return self.can_read or (self.end_ban_read < datetime.now())
else:
return self.can_read
def can_write_now(self):
if self.user.is_active:
if self.end_ban_write:
return self.can_write or (self.end_ban_write < datetime.now())
else:
return self.can_write
else:
return False
class TokenForgotPassword(models.Model):
class Meta:
verbose_name = 'Token de mot de passe oublié'
verbose_name_plural = 'Tokens de mots de passe oubliés'
user = models.ForeignKey(User, verbose_name='Utilisateur')
token = models.CharField(max_length=100)
date_end = models.DateTimeField('Date de fin')
def get_absolute_url(self):
'''Absolute URL to the new password page'''
return reverse('zds.member.views.new_password')+'?token={0}'.format(self.token)
class TokenRegister(models.Model):
class Meta:
verbose_name = 'Token d\'inscription'
verbose_name_plural = 'Tokens d\'inscription'
user = models.ForeignKey(User, verbose_name='Utilisateur')
token = models.CharField(max_length=100)
date_end = models.DateTimeField('Date de fin')
def get_absolute_url(self):
'''Absolute URL to the active account page'''
return reverse('zds.member.views.active_account')+'?token={0}'.format(self.token)
def __unicode__(self):
'''Textual forum of a profile'''
return u"{0} - {1}".format(self.user.username, self.date_end)
class Ban(models.Model):
class Meta:
verbose_name = 'Sanction'
verbose_name_plural = 'Sanctions'
user = models.ForeignKey(User, verbose_name='Sanctionné')
moderator = models.ForeignKey(User, verbose_name='Moderateur',
related_name='bans')
type = models.CharField('Type', max_length=15)
text = models.TextField('Explication de la sanction')
pubdate = models.DateTimeField('Date de publication', blank=True, null=True)
| UTF-8 | Python | false | false | 6,608 | py | 156 | models.py | 60 | 0.628844 | 0.624602 | 0 | 184 | 34.875 | 119 |
emingure/SchoolProjects | 8,710,193,691,144 | aedc1456901ba1a28efb5ff9fc674f8e6aa35f5c | 50cadd32262e5ac612eb8b45c05829c6522585fb | /CmpE 230 - Systems Programming/exercises/b.py | 5fafb85e7cdc62bca17a5f4b2b35119b16b68ceb | []
| no_license | https://github.com/emingure/SchoolProjects | 7301cfd0711f310f70880e163f98996403cd8dda | 8e3c182c16453647ca4269051e2d491b37f53dc9 | refs/heads/master | 2020-04-17T09:23:01.845511 | 2019-01-18T18:37:21 | 2019-01-18T18:37:21 | 166,452,379 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
import sys
n = int(sys.argv[1])
for i in range(2,n+1):
str = "1" * i
found = re.search(r"^(11+?)\1+$",str)
if not found:
print len(str),str
| UTF-8 | Python | false | false | 157 | py | 55 | b.py | 31 | 0.585987 | 0.541401 | 0 | 10 | 14.7 | 38 |
twcrane/thekla | 17,025,250,379,315 | 32e2b8ca779635b23710910d33ee81278235ffb5 | dcc73447097e0fe627d1dc9d1005b2bc60b5c310 | /src/Document.py | cfb300bc25716c2fec67ec4861cdccbe66a585c0 | []
| no_license | https://github.com/twcrane/thekla | 746f76bd3dc5bca5a2dd4894f0f4129da1cd028e | e945d2548b3b3ced4393a2189269ca784e76b8bb | refs/heads/master | 2021-01-16T22:03:59.264554 | 2013-10-01T07:42:30 | 2013-10-01T07:42:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import codecs
import os
import scipy
def repr_with_tm(fname, tm):
"""
Read file /fname/ and map each word to the id of the corresponding topic
vector in /tm/ and save the result in a list. With the list and the topic
model you can construct a word_id_list of topic vectors representing the
document.
"""
#!Warning!: The resulting word_id_list is bound to the topic models vocabulary.
#Work with the ids only if you are sure you are working with the same
#tm.vocab
with codecs.open(fname, 'r', 'utf-8') as doc_file:
for line in doc_file:
if line.isspace():
continue
word_id_list = [] #stores the tm ids of the words in the document
for word in line.split():
#word = word.lower()
if tm.word_id_dict.has_key(word):
word_id_list.append(tm.word_id_dict[word])
if not word_id_list:
logging.debug('Found no known word in file {fname}'.format(fname=fname))
return word_id_list
def _normalize_vector(vec):
"""
Normalize /vec/ (vector or matrix) to a range between 0 and 1.
"""
#We are dealing with log probabilities, therefore all values are negative
v_min = scipy.amin(vec)
v_max = scipy.amax(vec)
f = lambda x: (x-v_min)/(v_max-v_min)
return f(vec)
class Documents:
"""Represent Documents using topic models"""
def __init__(self, doc_source, tm, centroid_computation):
self.tm = tm
#load absolute file names of text files
fnames = []
if isinstance(doc_source, str):
doc_dir = os.path.abspath(doc_source) + os.sep
fnames = [doc_dir + doc for doc in os.listdir(doc_dir) if
doc.endswith('.txt')]
elif isinstance(doc_source, list):
raw_fnames = []
for doc in doc_source:
base = os.path.basename(doc)
if base.endswith('.txt'):
doc_dir = os.path.abspath(doc)
fnames.append(doc_dir)
#represent documents with help of topics
self.docs = {}
for fname in fnames:
doc = Document(fname, tm, centroid_computation)
if doc.centroid is not None:
self.docs[fname] = doc
def as_centroid_matrix(self):
"""
Returns a matrix consisting of the document centroids and a list
holding the information which document's centroid is in which line of
the matrix.
"""
id_list = []
assert len(self.docs) > 0
doc_centroid_matrix = scipy.zeros((0,len(self.tm)))
for key, doc in self.docs.iteritems():
id_list.append(key)
doc_centroid_matrix = scipy.vstack(
(doc_centroid_matrix, doc.centroid))
return doc_centroid_matrix, id_list
def compute_cluster_centroid(self, doc_list, flavor='avg'):
"""
Iterates over the doc_list and returns the average of all document's
centroids.
"""
cluster_centroid = scipy.zeros((1,len(self.tm)))
for doc in doc_list:
#add document centroid to the matrix
try:
centroid = self.docs[doc].centroid
except KeyError as ke:
logging.error(ke)
logging.warning('centroid missing for document "{doc}"'.format(doc=doc))
continue
if centroid != None:
cluster_centroid += centroid
cluster_centroid /= len(doc_list)
return cluster_centroid
class Document:
"""Represent a Document"""
def __init__(self, fname, tm, centroid_flavor='avg'):
#define which way is used to compute the centroid of the document
flavors = {'avg' : self.compute_centroid_avg,
'exp' : self.compute_centroid_avg_experimental}
self.fname = fname
self.cluster_assignment = -1
compute_centroid = flavors[centroid_flavor]
word_id_list = repr_with_tm(fname, tm)
self.centroid = compute_centroid(tm, word_id_list)
del(word_id_list)
def __str__(self):
return 'Document "{name}"'.format(name=self.fname)
def set_cluster_assignment(self, k):
self.cluster_assignment = k
def compute_centroid_avg(self, tm, word_id_list):
"""
Computes the normalized average of the topic vectors of all words in
this document
"""
try:
#no centroid without a vector
assert len(word_id_list) > 0
except AssertionError as ae:
logging.error(ae)
logging.error('document "{name}" seems to be empty!'.format(name=self.fname))
return None
#create vector to sum words' topic vectors
centroid = scipy.zeros(len(tm.topics))
for w_id in word_id_list:
topic_vector = tm.get_topic_vector_for_id(w_id)
centroid += topic_vector
#compute the average of the topics
centroid = centroid / float(len(word_id_list))
#normalize the vector to a range between 0 and 1
centroid = _normalize_vector(centroid)
return centroid
def compute_centroid_avg_experimental(self, tm, word_id_list):
"""
Computes the average of the *normalized* topic vectors of all words in
this document
"""
try:
#no centroid without a vector
assert len(word_id_list) > 0
except AssertionError as ae:
logging.error(ae)
logging.error('document "{name}" seems to be empty!'.format(name=self.fname))
return None
#create vector to sum words' topic vectors
centroid = scipy.zeros(len(tm.topics))
#TODO if this is used regularly, the normalized topic vector could
#already be precomputed in tm
for w_id in word_id_list:
topic_vector = tm.get_topic_vector_for_id(w_id)
#normalize topic vector
topic_vector = _normalize_vector(topic_vector)
centroid += topic_vector
#compute the average of the topics
centroid = centroid / float(len(word_id_list))
return centroid
| UTF-8 | Python | false | false | 6,366 | py | 2,022 | Document.py | 6 | 0.587496 | 0.585768 | 0 | 181 | 34.171271 | 89 |
amitupadhyay6/My-Python-Program | 16,045,997,838,261 | b92b5ac85af134680642f6cdf426e53ea7cd66f5 | c3a08f9bfac42053e34d0972095b56851cb1b60a | /My Codes/Sum and Prod.py | f9772ca93beb09a46db5dabd8487a867da9a3497 | []
| no_license | https://github.com/amitupadhyay6/My-Python-Program | 588b90a783264d40bc19e8050ebe6607d2e23bb1 | feb2f2d49ab4f863382941a8eb8caf2f9a0f4dc2 | refs/heads/master | 2022-09-19T23:50:26.701405 | 2020-05-31T11:09:53 | 2020-05-31T11:09:53 | 262,348,377 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy
ar = numpy.array([ [1,2], [7,10]])
print(ar.sum())
print(numpy.sum(ar, axis=0))
print(numpy.sum(ar, axis=1))
print(numpy.sum(ar, axis=None))
print(numpy.sum(ar))
print("\n")
print(ar.prod())
print(numpy.prod(ar, axis=0))
print(numpy.prod(ar, axis=1))
print(numpy.prod(ar, axis=None))
print(numpy.prod(ar))
| UTF-8 | Python | false | false | 336 | py | 170 | Sum and Prod.py | 146 | 0.639881 | 0.613095 | 0 | 14 | 21.857143 | 34 |
oyjjforever/licensePlate | 12,670,153,545,718 | f8fa905bb5d29fc418588e14a4da877c14da1892 | ba1f5f2872f58f1c867ffc5277d092adcd34f72d | /license_locate.py | 58f780a3b291fce5ef5a791bafa4b8a24c400812 | []
| no_license | https://github.com/oyjjforever/licensePlate | 5fd3e514d321425aa42c0fc1b6319328b579718f | bdd6190e4e3bfc25ec0642d9840d753307128d9e | refs/heads/master | 2020-06-24T22:29:31.017596 | 2019-08-02T08:42:47 | 2019-08-02T08:42:47 | 199,110,014 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import copy
import datetime
import math
from collections import Counter
import cv2
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
# 确定最小搜索窗口为180*30,后续进行细定位
from PIL import Image
window_h = 20
window_w = 180
sobel_image = []
class Locate:
def column_diff(self,pic):
h, w = pic.shape[:2] # w = 640,h = 480
coldiff_img = np.zeros((h, w), dtype=np.uint8)
for row in range(0,h):
for column in range(0,w-1):
coldiff_img[row][column+1] = pic[row][column]
return coldiff_img
# 预处理图片
def preprocess_image(self, file_name):
# 加权平均法 + cv2 进行灰度化
image = cv2.imread(file_name)
gray_image = self.get_gray_image_by_weight_avg(image)
# 进行Sobel的垂直边缘检测
# 利用Sobel方法可以进行sobel边缘检测
# img表示源图像,即进行边缘检测的图像
# cv2.CV_64F表示64位浮点数即64float。
# 这里不使用numpy.float64,因为可能会发生溢出现象。用cv的数据则会自动
# 第三和第四个参数分别是对X和Y方向的导数(即dx,dy),对于图像来说就是差分,这里1表示对X求偏导(差分),0表示不对Y求导(差分)。其中,X还可以求2次导。
# 注意:对X求导就是检测X方向上是否有边缘。
# 第五个参数ksize是指核的大小。
x = cv2.Sobel(gray_image, cv2.CV_16S, 1, 0)
#在经过处理后,别忘了用convertScaleAbs()函数将其转回原来的uint8形式。否则将无法显示图像,而只是一副灰色的窗口。
scale_abs_x = cv2.convertScaleAbs(x) # convert 转换 scale 缩放
sobel_image = scale_abs_x
y = cv2.Sobel(gray_image, cv2.CV_16S, 0, 1)
scale_abs_y = cv2.convertScaleAbs(y)
sobel_image_y = scale_abs_y
# 二值化处理和中值滤波平滑
diff = np.array((640, 480), np.uint8)
gray_image = cv2.resize(gray_image, (640, 480))
sobel_image = cv2.resize(sobel_image, (640, 480)) # 调整图片尺寸,以便后期处理
sobel_image_y = cv2.resize(sobel_image_y, (640, 480)) # 调整图片尺寸,以便后期处理
diff = cv2.absdiff(self.column_diff(sobel_image), sobel_image) # 做灰度图像和经过sobel边缘检测后的图片的水平差分,以去除背景影响==》跳变点图
#diff = cv2.absdiff(gray_image, sobel_image)
avg = self.get_pixel_avg(diff)
print("avg:" ,avg)
#跳变点图作自适应阈值化
# cv::adaptiveThreshold(
# cv::InputArray src, // 输入图像
# double maxValue, // 向上最大值
# int adaptiveMethod, // 自适应方法,平均或高斯
# int thresholdType // 阈值化类型
# int blockSize, // 块大小
# double C // 常量
# );
binary = cv2.adaptiveThreshold(diff, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,
5,
30#math.floor(avg / 10)#40
)
# cv2.imshow("binary", binary)
# cv2.waitKey(0)
return sobel_image, sobel_image_y,diff, binary
def rough_locate(self, sobel_image, binary_image, diff):
h, w = binary_image.shape[:2] # w = 640,h = 480
# 获取到二值化图像后,获取灰度跳变数组
image_jump_array = self.get_2d_gray_jump(binary_image)
#print(image_jump_array)
# 通过灰度跳变数组获取灰度跳变的积分图,用于找到灰度跳变点最多的区域
integral_array = self.get_gray_scale_jump_integral(h, w, image_jump_array)
#print(integral_array)
# img = np.array(integral_array)
# plt.imshow(img)
# 通过灰度跳变积分图,进行窗口搜索,粗定位车牌位置,
# 以上得到粗定位的车牌候选区域,之后进行细定位车牌位置
candiate_list = self.rough_search_by_window(h, w, integral_array, sobel_image,binary_image)
return integral_array, candiate_list
def detail_locate_and_confirm(self, candiate_list, sobel_image, sobel_image_y,integral_array):
# position : 粗定位的车牌左上角坐标
# 第一步扩展车牌的上下区域获得最佳上下边界
# 具体为获取扩展区域的水平投影并获取其平均值,从水平中央开始向两侧扫描,发现小于均值的则是上下边界
# 第二步扩展车牌的左右区域获得最佳左右边界
# 扩展后通过数据结构的最大子段和获取左右边界,因为在灰度图像中,白色为255,黑色为0
candiate_list_2 = [] # 用作第二轮筛选
for i in range(len(candiate_list)):
flag = True # 标志表示步骤正常运行 ,标志要放在循环内,以便每次刷新
position = candiate_list[i]
#print(position)
h, w = sobel_image.shape[:2] # w = 640,h = 480
# print(position)
step_1 = sobel_image_y[position[0] - window_h:position[0] + 2 * window_h, position[1]:position[1] + window_w]
# cv2.imshow("step_1", step_1)
# cv2.waitKey(0)
X = position[1]
Y = position[0] - window_h
if position[0] - window_h < 0:
step_1 = sobel_image_y[0:position[0] + 2 * window_h, position[1]:position[1] + window_w]
X = position[1]
Y = 0
elif position[0] + 2 * window_h > h:
step_1 = sobel_image_y[position[0] - window_h:h, position[1]:position[1] + window_w]
X = position[1]
Y = position[0] - window_h
# 获取水平投影数组
hor_list = self.get_horizontal_projection(step_1)
# 通过水平投影数组获取车牌的重定位的上下边界
upper, lower = self.detail_position_the_upper_and_lower_boundaries(X,Y,hor_list,sobel_image_y,position)
if upper != lower:
step_1 = step_1[lower:upper, 0:w]
# cv2.imshow("step_1", step_1)
# cv2.waitKey(0)
flag = True
else:
flag = False
# ##########上下边界定位结束########################
# ##########下面开始进行左右边界的定位###################
if flag is True: # 如果第一步上下边界正常运行
image_step_1 = step_1
h_1, w_1 = image_step_1.shape[:2]
image_left = position[1] - math.floor(w_1 / 2)
image_right = position[1] + window_w + math.floor(w_1 / 2)
if image_left < 0:
flag = False
elif image_right > 640:
flag = False
if flag is True:
image_step_2 = sobel_image[position[0] - window_h + lower: position[0] - window_h + upper,
image_left:image_right]
ver_list = self.get_vertical_projection(image_step_2)
sum = 0
for i in range(len(ver_list)):
sum += ver_list[i]
avg = math.floor(sum / len(ver_list))
# 求均值后,将ver_list每个值都减去avg,然后用最大字段和来获取左右边界,在灰度图中,白色为255,黑色为0
for i in range(len(ver_list)):
ver_list[i] = ver_list[i] - avg
left, right = self.max_sequence(ver_list)
if left == 0 and right == 0:
flag = False
if flag is True:
h, w = image_step_2.shape[:2]
image_step_2 = image_step_2[0:h, left:right]
# cv2.imshow("image_step_2", image_step_2)
# cv2.waitKey(0)
x1 = position[0] - window_h + lower
x2 = position[0] - window_h + upper
y1 = image_left + left
y2 = image_left + right
# print("x2 - x1 = " + str(x2 - x1))
# print("y2 - y1 = " + str(y2 - y1))
# 筛选掉边缘区域
if x1 < 100 or y1 < 100 or x2 > 480 or y2 > 640:
flag = False
# 筛选车牌长宽比
ratio = (y2 - y1) / (x2 - x1)
# print(ratio)
if ratio > 12 or ratio < 5:
flag = False
if flag is True:
area_gray_jump = int(integral_array[x2][y2]) + int(
integral_array[x1][y1]) - int(integral_array[x2][y1]) - int(
integral_array[x1][y2])
if area_gray_jump > math.floor(window_w * window_h / 8): # 灰度跳变数要大于搜索窗口面积的八分之一
if len(candiate_list_2) != 0:
repeated_index = len(candiate_list_2) - 1
if not (abs(x1 - candiate_list_2[repeated_index][0]) <= 5 or abs(x2 - candiate_list_2[repeated_index][0]) <= 5 or abs(y1 - candiate_list_2[repeated_index][2]) <= 5 or abs(y2 - candiate_list_2[repeated_index][3]) <= 5):
candiate_list_2.append((x1, x2, y1, y2))
else:
candiate_list_2.append((x1, x2, y1, y2))
image_need = sobel_image[x1:x2, y1:y2]
# cv2.imshow("image_need", image_need)
# cv2.waitKey(0)
# 融合候选区域中坐标相近的位置
# print(candiate_list_2)
# print(Counter(candiate_list_2).most_common(3))
result_locate = Counter(candiate_list_2).most_common(5)
max_ratio = 0 # 不能起名为关键字,会冲突
max_index = 0
max_width = 0
for i in range(len(result_locate)):
x1 = result_locate[i][0][0]
x2 = result_locate[i][0][1]
y1 = result_locate[i][0][2]
y2 = result_locate[i][0][3]
w = y2 - y1
image = sobel_image[x1:x2,y1:y2]
# cv2.imshow("image", image)
# cv2.waitKey(0)
ratio = math.floor((y2 - y1) / (x2 - x1))
print(ratio)
if ratio >= max_ratio:
max_ratio = ratio
max_index = i
max_width = x2 - x1
print(max_ratio)
# print(result_locate[0][1]) 出现最多的数出现的次数 [((238, 255, 202, 362), 36)]
result_x1 = result_locate[max_index][0][0]
result_x2 = result_locate[max_index][0][1]
result_y1 = result_locate[max_index][0][2]
result_y2 = result_locate[max_index][0][3]
position = (result_x1, result_x2, result_y1, result_y2)
result = sobel_image[result_x1:result_x2, result_y1:result_y2]
self.showLine(position[0],position[1],sobel_image_y,1,"stop")
return result, position
def rough_search_by_window(self, h, w, integral_array, ori_image,binary_image):
max_gray_jump = 0
candiate_list = []
max_locate = (0, 0)
for x in range(0, h - window_h, 5):
for y in range(0, w - window_w, 5):
# (x4,y4) + (x1,y1) - (x2,y2) - (x3,y3)
area_jump_level = 0
if ori_image[x + math.floor(window_h / 2)][y + math.floor(window_w / 2)] > 127:
area_jump_level = int(integral_array[x + window_h][y + window_w]) + int(
integral_array[x][y]) - int(integral_array[x + window_h][y]) - int(
integral_array[x][y + window_w])
#print(int(integral_array[x + window_h][y + window_w]) ,"+", int(
# integral_array[x][y]) ,"-", int(integral_array[x + window_h][y]) ,"-", int(
# integral_array[x][y + window_w]))
#print("x:",x,"y:",y,"跳变点个数:", area_jump_level,"区域总数的1/8:",(window_h * window_w) / 8)
if area_jump_level > (window_h * window_w) / 8.5 and 100 < x < 480 - 100 and 100 < y < 640 - 100:
candiate_list.append((x, y))
#self.showLine(x, y, binary_image, 1,"stop")
# print(area_jump_level)
#self.showLine(x,y,binary_image,0,"stop")
# print(candiate_list_1)
candiate_list.reverse()
return candiate_list
def get_gray_image_by_weight_avg(self, ori_image):
h, w = ori_image.shape[:2]
gray_img = np.zeros((h, w), dtype=np.uint8)
for i in range(h):
for j in range(w):
# Y = 0.3R + 0.59G + 0.11B
# 通过cv格式打开的图片,像素格式为 BGR
gray_img[i, j] = 0.3 * ori_image[i, j][2] + 0.11 * ori_image[i, j][0] + 0.59 * ori_image[i, j][1]
return gray_img
def get_pixel_avg(self, image):
sum = 0
h, w = image.shape[:2]
for x in range(h):
for y in range(w):
sum += image[x][y]
avg = sum / (h * w)
return avg
def get_character_density(self, image): # 计算图片的字符密度
sum = 0
h, w = image.shape[:2]
for x in range(h):
for y in range(w):
sum += image[x][y]
avg = sum / (h * w)
return sum
# 获取图像灰度跳变点图
def get_2d_gray_jump(self, image):
jump_list_2d = np.zeros((480, 640), np.uint8)
h, w = image.shape[:2]
for x in range(h):
for y in range(w):
if abs(int(image[x][y]) - image[x][y - 1]) > 230: # 将数值调低后,就能找到亮度较低的车牌图像的定位
jump_list_2d[x][y] = 1
else:
jump_list_2d[x][y] = 0
# np.set_printoptions(threshold=1e6)
# print(jump_list_2d)
return jump_list_2d
# 获取灰度跳变的积分图
def get_gray_scale_jump_integral(self, h, w, jump_array):
h, w = jump_array.shape[:2] # 把图片2像素的行数,列数以及通道数返回给rows,cols,channels
sum = np.zeros((h + 1, w + 1), dtype=np.float32) # 创建指定大小的数组,数组元素以 0 来填充:
image_integral = cv2.integral(jump_array, sum, cv2.CV_32SC1) # 计算积分图,输出是sum
gray_jump_integral = np.zeros((h + 1, w + 1), dtype=np.uint16)
cv2.normalize(image_integral, gray_jump_integral, 0, 65535, cv2.NORM_MINMAX, cv2.CV_16UC1) # 归一化处理
np.set_printoptions(threshold=1e6)
return gray_jump_integral
def get_horizontal_projection(self, shadow_image):
h, w = shadow_image.shape[:2]
hor_list = []
for x in range(h):
pixel_sum = 0
for y in range(w):
pixel_sum += shadow_image[x][y]
hor_list.append(pixel_sum)
return hor_list
def get_vertical_projection(self, shadow_image):
h, w = shadow_image.shape[:2]
ver_list = []
for x in range(w):
pixel_sum = 0
for y in range(h):
pixel_sum += shadow_image[y][x]
ver_list.append(pixel_sum)
return ver_list
def detail_position_the_upper_and_lower_boundaries(self, X,Y,list,sobel_image_y,position):
sum = 0
upper = 0
lower = 0
for i in range(len(list)):
sum += list[i]
avg = 9000 # math.floor(sum / len(list))
#print("[detail_position_the_upper_and_lower_boundaries] avg:",avg)
# 从中间往上遍历,遇到比均值小的,则是车牌细定位的上边界,并跳出循环
flag = False
for i in range(math.floor(len(list) / 2), 0, -1):
self.showScanLine(position[0], position[1], Y + i, X, sobel_image_y, "row", 0)
if list[i] > avg:
self.showScanLine(position[0], position[1], Y + i, X, sobel_image_y, "row",1)
upper = i
flag = True
if flag is False:
continue
break
# 从中间往下遍历,遇到比均值小的,则是车牌细定位的下边界,并跳出循环
flag = False
for i in range(math.floor(len(list) / 2), len(list)):
self.showScanLine(position[0], position[1], Y + i, X, sobel_image_y, "row", 0)
if list[i] > avg:
self.showScanLine(position[0], position[1], Y + i, X, sobel_image_y, "row",1)
lower = i
flag = True
if flag is False:
continue
break
return lower, upper
def max_sequence(self, array):
sum = 0
max = 0
bestI = 0
bestJ = 0
i = 0
for j in range(len(array)):
sum += array[j]
if sum > 0:
sum += array[j]
else:
sum = array[j]
i = j
if sum > max:
max = sum
bestI = i
bestJ = j
l = bestI
r = bestJ
return l, r
def license_locate(self, filename): # 这个是获取垂直跳变点图的位置
sobel_image,sobel_image_y, diff, binary = self.preprocess_image(filename)
# self.showPicture(sobel_image)
# self.showPicture(diff)
# self.showPicture(binary)
integral_array, candiate_list = self.rough_locate(sobel_image, binary, diff)
result, position = self.detail_locate_and_confirm(candiate_list, sobel_image, sobel_image_y,integral_array)
return result
def showPicture(self,pic):
cv2.imshow("image", pic)
cv2.waitKey(0)
def showScanLine(self,posY,posX,Y,X,pic,type,zeroIsDeepCopy):
color = 255 # 置为白点
if (zeroIsDeepCopy == 0):
image = copy.deepcopy(pic)
else:
image = pic
# for topLineElement in range(posX,posX + 180):
# image[posY][topLineElement]=color
# image[posY + 20][topLineElement]=color
# for leftColumnElement in range(posY, posY + 20):
# image[leftColumnElement][posX]=color
# image[leftColumnElement][posX + 180]=color
if(type == "row"):
for topLineElement in range(0,640):
image[Y][topLineElement]=color
else:
for leftColumnElement in range(0, 480):
image[leftColumnElement][X]=color
cv2.imshow("image", image)
cv2.waitKey(10)
def showLine(self,Y,X,pic,zeroIsDeepCopy,stopFlag):
# image = cv2.imread(filename)
# image = cv2.resize(image, (640, 480))
color = 255 # 置为白点
if(zeroIsDeepCopy==0):
image = copy.deepcopy(pic)
else:
image = pic
for topLineElement in range(X,X + 180):
image[Y][topLineElement]=color
image[Y + 20][topLineElement]=color
for leftColumnElement in range(Y, Y + 20):
image[leftColumnElement][X]=color
image[leftColumnElement][X + 180]=color
cv2.imshow("image", image)
if(stopFlag=="stop"):
cv2.waitKey(0)
else:
cv2.waitKey(10)
if __name__ == '__main__':
begin = datetime.datetime.now()
instance = Locate()
# for i in range(9, 58):
filename = 'picture_for_train/11.jpg'
#filename = filename + str(i) + ".jpg"
print(filename)
# ###############行扫描法 #################
# image = cv2.imread(filename)
# gray_img = instance.get_gray_image_by_weight_avg(image)
# out = instance.grey_scale(gray_img)
# cv2.imshow("out", out)
# result = instance.mark_row_area(out)
#
# cv2.imshow("result", result)
# cv2.waitKey(0)
##################################################
result = instance.license_locate(filename)
#instance.showPicture(result)
# # cv2.imshow(filename[-8:], result)
# # cv2.waitKey(0)
# h, w = result.shape[:2]
#
# result = cv2.resize(result, (200, 40))
# cv2.imwrite('D:/Car_Identify/picture_locate/' + str(i) + ".jpg", result)
end = datetime.datetime.now()
print(str((end - begin).seconds) + "s")
| UTF-8 | Python | false | false | 20,569 | py | 2 | license_locate.py | 2 | 0.51484 | 0.483051 | 0 | 475 | 37.941053 | 246 |
cd-glacier/sketch_detection | 8,770,323,258,938 | cf0870cc205f7af7cc1b3bd569b934825c30ce1b | 8b78618774baf7226eaa969784c0ffd9d551ba1c | /canny.py | 807766af3de2e4bf320d35c6c0d7babe331599e1 | []
| no_license | https://github.com/cd-glacier/sketch_detection | 94207d4142274da44c8f4931535e5aeabd8d4380 | 913a3a1df95f55182d294fa3defcaf417f5a565e | refs/heads/master | 2021-05-31T19:34:48.157850 | 2016-02-06T16:36:49 | 2016-02-06T16:36:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import sys
import sqlite3
import os
def getFileName():
files = os.listdir('./images')
#delete .DS_Store
del files[0]
return files
###########################################################################
files_name = getFileName()
for file_name in files_name:
name = file_name
img = cv2.imread('./images/' + name)
g_img = cv2.bilateralFilter(img, 9, 75, 75)
c_img = cv2.Canny(g_img, 50, 150)
cv2.imwrite('./canny/' + name, c_img)
| UTF-8 | Python | false | false | 478 | py | 5 | canny.py | 3 | 0.566946 | 0.531381 | 0 | 24 | 18.875 | 75 |
fege/Thesis-Project | 816,043,816,783 | 4d44821575ea5beb048f06b374567ac7701628e9 | 8980f35343d346d39c01f28d5161bf0687765add | /restfs/manager/ResourceManager.py | d86987a62662cb6a713e44eb9195221df027479b | []
| no_license | https://github.com/fege/Thesis-Project | 51f47961ef54d9b8dc91b13bd9ff4feda5f447cf | 4d1d0e1ccb603771800e677de53fbcdb4f37b511 | refs/heads/master | 2021-01-21T21:48:40.178614 | 2012-09-26T21:04:28 | 2012-09-26T21:04:28 | 5,972,334 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from tornado.options import options
from restfs.objects.BucketOwner import BucketOwner
def rpcmethod(func):
""" Decorator to expose Node methods as remote procedure calls
Apply this decorator to methods in the Node class (or a subclass) in order
to make them remotely callable via the DHT's RPC mechanism.
"""
func.rpcmethod = True
return func
class ResourceManager(object):
_LOGGER = logging.getLogger('RESOURCE_MANAGER')
def __init__(self):
#FIXME SERVICE ..
resource_plugin = "restfs.resource.%s.ResourceDriver" % options.resource_driver
resource_mod = __import__(resource_plugin, globals(), locals(), ['ResourceDriver'])
Resource = getattr(resource_mod, 'ResourceDriver')
self.resource = Resource()
##########################################################################
# Bucket
##########################################################################
def findBucket(self,bucket_name):
self._LOGGER.debug("FIND Bucket %s" % bucket_name)
return self.resource.findBucket(bucket_name)
@rpcmethod
def findCluster(self,bucket_name):
return self.resource.findCluster(bucket_name)
def getBucketListByOwner(self,idUser):
self._LOGGER.debug("Get Bucket List by user %s " % idUser)
return self.resource.getBucketListByOwner(idUser)
def getCountBucketByOwner(self,idUser):
self._LOGGER.info("GET Count Bucket By Owner")
return self.resource.getCountBucketByOwner(idUser)
def getRegionList(self):
self._LOGGER.info("GET Region List")
return self.resource.getRegionList()
def addBucket(self, bucket_name, idUser):
self._LOGGER.info("Add Bucket")
bucket = BucketOwner(idUser,bucket_name)
self.resource.addBucket(bucket)
def removeBucket(self, bucket_name, context):
self._LOGGER.info("Remove Bucket")
self.resource.removeBucket(bucket_name)
def setBucketStatus(self, bucket_name, status, context):
self._LOGGER.info("SET Bucket Status")
self.resource.setBucketStatus(bucket_name, status)
| UTF-8 | Python | false | false | 2,335 | py | 86 | ResourceManager.py | 84 | 0.59015 | 0.59015 | 0 | 64 | 35.03125 | 91 |
staticdev/toml-validator | 5,549,097,772,298 | 4db10dc154a74f2b590a2d1fde59cc27d8b44b42 | 5eb6fc7cc27926a772a5036819b8bab5bb2989c5 | /src/toml_validator/use_cases/toml.py | bc82faaf1865a97ea7481974ab6dadf464103ff5 | [
"MIT"
]
| permissive | https://github.com/staticdev/toml-validator | 80076281fdae532a7f930570225ff4624ccd0aa1 | e3bc7a674c5ec1c996d7539616411784995869b1 | refs/heads/master | 2021-08-08T17:41:39.629182 | 2021-02-16T18:55:03 | 2021-02-16T18:55:03 | 243,814,155 | 4 | 1 | MIT | false | 2021-02-16T18:48:54 | 2020-02-28T17:07:00 | 2021-02-16T18:04:03 | 2021-02-16T18:48:53 | 689 | 1 | 1 | 0 | Python | false | false | """TOML Validator validations."""
import tomlkit
from tomlkit.exceptions import ParseError
from tomlkit.exceptions import TOMLKitError
def validate_extension(filename: str) -> bool:
"""Validates extension in filename.
Args:
filename (str): name of the file.
Returns:
bool: if extension is valid.
"""
valid_extensions = [".toml"]
for extension in valid_extensions:
if filename.endswith(extension):
return True
return False
def execute(filename: str) -> str:
"""It validates the TOML.
Args:
filename (str): name of the file.
Returns:
str: error messages.
"""
with open(filename) as toml:
lines = toml.read()
try:
tomlkit.parse(lines)
return ""
except (TOMLKitError, ParseError) as errors:
return str(errors)
| UTF-8 | Python | false | false | 855 | py | 12 | toml.py | 8 | 0.624561 | 0.624561 | 0 | 39 | 20.923077 | 48 |
hbahamonde/Smote_US | 1,460,288,898,188 | 8f1f663fa5812e65f7dd38de84f435425a17fd7b | cccabaf75f4f52a19d5ffe09843923b334652bc9 | /Datos/smote_svm.py | dac6cd24d2b08296ba74e53d3cc152bfd49f25fe | [
"Apache-2.0"
]
| permissive | https://github.com/hbahamonde/Smote_US | 195c826bd5f6883d1c048c5c4f9df142f5660e12 | 7bbb2bd5182e96c034f7b3c8b6e13d7584f8d6d3 | refs/heads/main | 2023-04-22T08:58:41.699675 | 2021-04-27T20:17:39 | 2021-04-27T20:17:39 | 353,096,291 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # svm.py
import numpy as np # for handling multi-dimensional array operation
import pandas as pd # for reading data from csv
from sklearn import svm # for classification problem
from sklearn.pipeline import make_pipeline # create pipeline
from sklearn.preprocessing import StandardScaler # scaling data
from collections import Counter
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
from matplotlib import pyplot
from numpy import where
from numpy import mean
#from sklearn.model_selection import cross_val_score
#from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
import random
from statistics import pstdev
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
data = pd.read_csv('./dataMergedConjoint.csv')
data = data.drop_duplicates(subset=['idnum'])
X = data[['woman', 'socideo', 'partyid', 'reg', 'trustfed', 'income.n','educ.n', 'polknow']]
y = data['vote.selling']
#concatenate?
#Standarization
xvalue = X.values
min_max_scaler = preprocessing.MinMaxScaler()
xscaled = min_max_scaler.fit_transform(xvalue)
X = pd.DataFrame(xscaled)
#training_pairs = pd.concat([X,y],axis=1)
nobs = len(X)
nfold = 10
lsets = int(np.ceil(nobs/nfold))
nrepeat = 3
RepeatSVMLinear = np.zeros([nrepeat,nfold])
PPVSVMLinear = np.zeros([nrepeat,nfold])
for i in range(nrepeat):
indexes = list(range(nobs))
random.shuffle(indexes)
dfs = np.array_split(indexes,nfold)
for j in range(nfold):
index_bad = X.index.isin(dfs[j])
X_test = X[index_bad]
y_test = y[index_bad]
X_train = X[~index_bad]
y_train = y[~index_bad]
#SMOTE
oversample = SMOTE(k_neighbors=7)
X_train,y_train = oversample.fit_resample(X_train,y_train)
linear_svc = svm.SVC(kernel='rbf',random_state=0, tol=1e-5, C = 1)
linear_svc.fit(X_train, y_train)
y_predict = linear_svc.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test.ravel(),y_predict).ravel()
RepeatSVMLinear[i,j] = tp/(tp+fn)
PPVSVMLinear[i,j] = tp/(tp+fp)
print('Linear')
print(mean(100*RepeatSVMLinear.ravel()))
print(pstdev(100*RepeatSVMLinear.ravel()))
print(mean(100*PPVSVMLinear.ravel()))
print(pstdev(100*PPVSVMLinear.ravel()))
##
### summarize class distribution
##counter = Counter(y)
##print(counter)
##
### Classification
##over = SMOTE(k_neighbors=7)
##
### Decision Tree Classifier
##steps = [('over', over), ('model', DecisionTreeClassifier())]
##pipeline = Pipeline(steps=steps)
##cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
##scores = cross_val_score(pipeline, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
##print('Mean ROC AUC: %.3f' % mean(scores))
##
### Decision Tree Classifier
##steps = [('over', over), ('model', LinearSVC())]
##pipeline = Pipeline(steps=steps)
##cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
##scores = cross_val_score(pipeline, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
##print('Mean ROC AUC: %.3f' % mean(scores))
##
##
##
### SVM only accepts numerical values.
### Therefore, we will transform the categories into
### values 1 and 0.
##
### at.run
##cancannot_map = {'Citizens CANNOT run for office for the next two elections':0, 'Citizens CAN run for office for the next two elections':1}
##data['at.run'] = data['at.run'].map(cancannot_map)
### at.asso
##cancannot_map = {'Citizens CANNOT associate with others and form groups':0, 'Citizens CAN associate with others and form groups':1}
##data['at.asso'] = data['at.asso'].map(cancannot_map)
### at.press
##cancannot_map = {'Media CANNOT confront the Government':0, 'Media CAN confront the Government':1}
##data['at.press'] = data['at.press'].map(cancannot_map)
### at.presaut
##cancannot_map = {'President CANNOT rule without Congress':1, 'President CAN rule without Congress':0}
##data['at.presaut'] = data['at.presaut'].map(cancannot_map)
### at.vote
##cancannot_map = {'Citizens CANNOT vote in the next two elections':0, 'Citizens CAN vote in the next two elections':1}
##data['at.vote'] = data['at.vote'].map(cancannot_map)
##
### drop last column (extra column added by pd)
### and unnecessary first column (id)
### data.drop(data.columns[[-1 0]], axis=1, inplace=True)
### put features & outputs in different DataFrames for convenience
##Y = data.loc[:, 'selected'] # all rows of 'diagnosis'
##X_c1 = data.iloc[range(0,11080,2),[5,6,7,8,9]] # all feature rows candidate 1
##X_c2 = data.iloc[range(1,11080,2),[5,6,7,8,9]] # all feature rows candidate 1
##X = X_c1.values-X_c2.values
##X = pd.DataFrame(X)
##Y_c1 = Y.iloc[range(0,11080,2)] # all feature rows candidate 1
##Y_c2 = Y.iloc[range(1,11080,2)] # all feature rows candidate 1
##Y = Y_c1.values-Y_c2.values
##Y = pd.DataFrame(Y)
##W = pd.DataFrame(data=None,columns=['k','w.at.run','w.at.asso','w.at.press','w.at.presaut','w.at.vote','selected','at.run','at.asso','at.press','at.presaut','at.vote'])
##
##print("training started...")
##for i in list(range(int(len(Y)/5))):
## print(i)
## X_train = X.iloc[5*i:5*(i+1),:]
## #X_train = [X_train.iloc[0,:],X_train.iloc[1,:],X_train.iloc[2,:],X_train.iloc[3,:],X_train.iloc[4,:]]
## y_train = Y.iloc[5*i:5*(i+1)]
## if (-1 in np.array(y_train)) and (1 in np.array(y_train)):
## #clf = make_pipeline(StandardScaler(),LinearSVC(random_state=0, tol=1e-5, fit_intercept=False))
## clf = LinearSVC(random_state=0, tol=1e-5, fit_intercept=False, C = 10, max_iter = 2000)
## clf.fit(X_train, y_train.values.ravel())
## #print(clf.decision_function(np.eye(5)))
## w=list(clf.decision_function(np.eye(5)))
##
## w = [i+1]+w
## w = pd.DataFrame({'k':[w[0],w[0],w[0],w[0],w[0]],
## 'w.at.run':[w[1],w[1],w[1],w[1],w[1]],
## 'w.at.asso':[w[2],w[2],w[2],w[2],w[2]],
## 'w.at.press':[w[3],w[3],w[3],w[3],w[3]],
## 'w.at.presaut':[w[4],w[4],w[4],w[4],w[4]],
## 'w.at.vote':[w[5],w[5],w[5],w[5],w[5]]})
## #aux=pd.DataFrame(np.ones((5,1))*w)
## w['selected']=y_train.values
## w['at.run']=X_train[0].values
## w['at.asso']=X_train[1].values
## w['at.press']=X_train[2].values
## w['at.presaut']=X_train[3].values
## w['at.vote']=X_train[4].values
## W = pd.concat([W,w])
##
##pd.DataFrame(W).to_excel(r'./File Name.xlsx', index = False)
##
##
| UTF-8 | Python | false | false | 6,605 | py | 84 | smote_svm.py | 4 | 0.64436 | 0.619379 | 0 | 165 | 39.030303 | 170 |
mty0313/get_my_code_back | 11,596,411,702,741 | 1b31a67f616b39b69ae1d6b67c22af83536bc423 | c5e36b1ff4427b5e182bb3309916446aa1fe89e0 | /get_accpeted_submission.py | 749a43c4a14515eff8501299459076b529cd0e31 | []
| no_license | https://github.com/mty0313/get_my_code_back | 8fdcc3e714c09cf8ea9bd3dc08aff4d1803ece33 | 2c87b7e05857f73354fca4a5b6e7786aefc82086 | refs/heads/master | 2022-03-11T09:29:52.438050 | 2019-11-22T06:44:46 | 2019-11-22T06:44:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
url = "https://acm.nuist.edu.cn/v1/course/37/user/own-submission"
headers = {
'authority': "acm.nuist.edu.cn",
'method': "GET",
'path': "/v1/course/37/user/own-submission?page=1&perpage=12",
'scheme': "https",
'accept': "application/json, text/plain, */*",
'accept-encoding': "gzip, deflate, br",
'accept-language': "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
'cookie': "lang=3; ; ssx-session-id=s%3ASvDHcXAY_5xiSP2qhG2aNy0RKyBX2rgJ.mfbNzqAR80E70jhWGYddwh9YpX28kB6D7Hkx%2Bo3tago; XSRF-TOKEN=oLxdjork-r2aHuV8eF-VqAJLJhX2Lp9Kkicw",
'if-none-match': '''W/"5dd269a0-b16"''',
'referer': "https://acm.nuist.edu.cn/course/37/submission",
'sec-fetch-mode': "cors",
'sec-fetch-site': "same-origin",
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36",
'x-xsrf-token': "FkfSMn3e-WHHiNzAfcal9pXreBEVOvdkWN9M",
'Cache-Control': "no-cache",
'Postman-Token': "b02b558b-8604-45bb-85ad-4e754324a5ba,a09f5332-6137-4287-89f5-a9e1f5792170",
'Host': "acm.nuist.edu.cn",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
if __name__ == "__main__":
result = []
for current_page in range(1, 5):
querystring = {"page": current_page, "perpage": "12"}
response = requests.request("GET", url, headers=headers, params=querystring)
result_list = response.json()[0]
for item in result_list:
if item['status'] == 0:
result_dict = {}
result_dict['id'] = item['id']
result_dict['problemId'] = item['problemId']
result_dict['title'] = item['title'].strip(' ')
result.append(result_dict)
with open('submission_info.txt', 'a+') as file:
for item in result:
print(item)
file.write(str(item) + '\n')
| UTF-8 | Python | false | false | 1,922 | py | 5 | get_accpeted_submission.py | 2 | 0.605099 | 0.538502 | 0 | 45 | 41.644444 | 173 |
Daniel-02/Exercicio3CoAP | 18,537,078,889,573 | 893e27dc2a3f6ec7e3cf739f977f186f7a543fcf | 7829830583b1bed7a2bd974774dd507dd0cd84cc | /aplicacao.py | 53a05f57088e7d34ce6965e26aebe69234779934 | []
| no_license | https://github.com/Daniel-02/Exercicio3CoAP | 66704b122806520b21e7a24160fa5c4fd5f612d4 | b1440cbf9118b4d24c465c79965957897c8070b1 | refs/heads/master | 2022-09-26T07:51:25.864922 | 2020-06-03T14:03:26 | 2020-06-03T14:03:26 | 268,911,012 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import getopt
import socket
import sys
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
from coapthon import defines
from threading import Thread
from sense_emu import SenseHat
# Autor: Daniel Arena Toledo
client = None
sense = SenseHat() #instancia emulado so SenseHat
red = (255, 0, 0) #RGB para cor vermelha
black = (0, 0, 0) #RGB para cor preta (apagado)
limiar_temp = 0 #armazena o limiar de temperatura recebido
limiar_pres = 0 #armazena o limiar de pressao recebido
#Funcao a ser executada quando o observe detecta alteracao no recurso
def callback(response):
global client
global limiar_temp
global limiar_pres
if(response.payload):
limites = response.payload.split()
limiar_temp = float(limites[0]) #armazena a nova temperatura recebida do servidor
limiar_pres = float(limites[1]) #armazena a nova pressao recebida do servidor
print ("Media dos limites atuais no servidor")
print ("Temperatura: {} C".format(limiar_temp) + " ---- Pressao: {} mbar".format(limiar_pres))
else:
print ("Nao ha valores armazenados")
#Trata o argumento passado e separa em host e porta
address = sys.argv[1]
host, port = address.split(':')
port = int(port)
client = HelperClient(server=(host, port)) #instancia um Client
resposta = client.get('sensor') #faz um get do recurso no servidor
if(resposta.payload):
limites = resposta.payload.split()
limiar_temp = float(limites[0]) #armazena a temperatura recebida do servidor
limiar_pres = float(limites[1]) #armazena a pressao recebida do servidor
client.observe('sensor', callback) #observa o recurso e chama o callback quando ele e alterado
while True:
temp = sense.temperature #pega a temperatura atual do sensor
pressure = sense.pressure #pega a pressao atual do sensor
#Verifica se ambas temperatura e pressao estao acima do limiar
acende_leds = True if (temp > limiar_temp and pressure > limiar_pres) else False
#Se estiverem acima do limiar seta leds vermelho, senao preto
pixels = [red if acende_leds else black for i in range(64)]
sense.set_pixels(pixels)
| UTF-8 | Python | false | false | 2,258 | py | 4 | aplicacao.py | 3 | 0.697963 | 0.690434 | 0 | 61 | 34.983607 | 102 |
vicety/LeetCode | 5,703,716,595,703 | a033e0aa0b224c89c0fdcce07094870306277a35 | 50efdd972f278f3fec22c64cd0ba038e2d035d58 | /python/interview/2022-intern/netease-2022/3.py | 2f5bbdc9d16b2181bdad8257c534b96c6fc5bf61 | []
| no_license | https://github.com/vicety/LeetCode | 470e53cb85bb421dd9ed61a672703358981aaf27 | d62b95145f0ea00873ed1fe45b2c31a4794cc7e5 | refs/heads/surface-aya | 2023-07-21T05:53:43.132791 | 2023-07-11T11:36:05 | 2023-07-11T11:36:05 | 250,862,810 | 0 | 0 | null | false | 2023-05-09T18:51:22 | 2020-03-28T18:11:33 | 2021-10-14T04:28:45 | 2023-05-09T18:51:19 | 1,221 | 0 | 0 | 4 | Java | false | false | n = int(input())
odd_layer_sum = 0
even_layer_sum = 0
now = 1
odd_layer = True
acc = 0
while True:
if acc + now > n:
if odd_layer:
odd_layer_sum += (n - acc)
else:
even_layer_sum += (n - acc)
break
acc += now
if odd_layer:
odd_layer_sum += now
else:
even_layer_sum += now
odd_layer = not odd_layer
now *= 2
ans = []
i = 0
now = 1
odd_now = 1
even_now = 2
if odd_layer_sum <= even_layer_sum: # 奇数层比偶数层节点少
use_even = True # 第一层消耗偶数
else:
use_even = False
while i <= n:
for j in range(1, now + 1):
if i + j > n:
break
if use_even:
if even_now <= n:
ans.append(even_now)
even_now += 2
else:
ans.append(odd_now)
odd_now += 2
else:
if odd_now <= n:
ans.append(odd_now)
odd_now += 2
else:
ans.append(even_now)
even_now += 2
use_even = not use_even
i += now
now *= 2
# print(odd_layer_sum, even_layer_sum)
print(" ".join(list(map(lambda x: str(x), ans))))
| UTF-8 | Python | false | false | 1,214 | py | 670 | 3.py | 669 | 0.45339 | 0.439831 | 0 | 58 | 19.344828 | 49 |
jmborr/code | 8,924,942,083,173 | cd963c4ec2f8cba10e7f2e526ae7cc55bac38fc8 | 83ed1e2f176133c03a5f6dfa504b8df15ae71efb | /projects/nonhomol_pdbJul05/avTMcentr.py | 9c09fbdf8b73eb2f4c41d06a4b321659988bc0ea | []
| no_license | https://github.com/jmborr/code | 319db14f28e1dea27f9fc703be629f171e6bd95f | 32720b57699bf01803367566cdc5fff2b6bce810 | refs/heads/master | 2022-03-09T16:11:07.455402 | 2019-10-28T15:03:01 | 2019-10-28T15:03:01 | 23,627,627 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/pyton
"""
for each sequence calculate average TM-score between centroids,
then average for all sequences
"""
import sys,os
from utilities.small_utilities import chomp
from spicker.spickerResultsManager import spickOut
from math import sqrt
headers=chomp(open('/gpfs1/active/jose/projects/nonhomol_pdbJul05/small_proteins/successAplusB.list','r').readlines())
#headers=chomp(open('/gpfs1/active/jose/projects/nonhomol_pdbJul05/small_proteins/toylist','r').readlines())
root='/gpfs1/active/jose/projects/nonhomol_pdbJul05/small_proteins'
avv=0
avv2=0
for header in headers:
p=spickOut(dir=root+'/'+header[1]+'/'+header)
av=0.0
for id1 in p.ids:
for id2 in p.ids:
av+=p.TMidId(id1,id2)
av=(av-p.nc)/(p.nc*(p.nc-1))
avv+=av
avv2+=av*av
print header,av
avv=avv/len(headers)
dev=sqrt(avv2/len(headers) - avv*avv )
print 'average=',avv,'deviation=',dev
| UTF-8 | Python | false | false | 911 | py | 693 | avTMcentr.py | 508 | 0.712404 | 0.688255 | 0 | 35 | 25.028571 | 118 |
stackpearson/cs-notes | 6,219,112,683,947 | 2c0cb57f4e438bcc318c5b50e3de9166775c3107 | 3f1db3c9641a93bece8c3952dd51788298b7f313 | /lambda_questions/treePaths.py | bf7a80cc2e7b69e4567ea337337c44fb7fe9e40d | []
| no_license | https://github.com/stackpearson/cs-notes | 8e312aaac13da16e826e46ae059622aba5b8ebb6 | 8193e6b3da65ac8865ab8f6343375a4d0d7a6c84 | refs/heads/master | 2023-02-16T01:09:12.337041 | 2021-01-07T02:58:12 | 2021-01-07T02:58:12 | 314,680,781 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def treePaths(t):
res = []
# base case - empty tree
if t is None:
return res
# pre order would work best
if t.left is None and t.right is None:
s = f'{t.value}'
res.append(s)
return res
string = ''
recurse(t, string, res)
return res
# need to call over tree recursively
def recurse(root, s, res):
# if we just have a head append the value
if root.left is None and root.right is None:
res.append(s + f'{root.value}')
# go over left hand side until we hit null
s += f'{root.value}->'
if root.left is not None:
recurse(root.left, s, res)
# go over right unitil we hit null
if root.right is not None:
recurse(root.right, s, res) | UTF-8 | Python | false | false | 807 | py | 34 | treePaths.py | 34 | 0.542751 | 0.542751 | 0 | 33 | 23.484848 | 48 |
vhnuuh/pyutil | 10,488,310,142,954 | a7ad7e7f948d2c913e9596cbe7f08c2b37d610d4 | 6ea48188896b8ffd157032441f90d20c629d76c3 | /examples/thread_/signal_/custom_exit.py | 2b7e4ed0e613ff2705a6dd1f5fbeb0293ef6c700 | []
| no_license | https://github.com/vhnuuh/pyutil | 71925838f6d620861638d8244827b2b3fb7db936 | d34f44b4de6ab4eebd0fe7782ea6e20285b17f31 | refs/heads/master | 2019-07-14T15:54:36.643100 | 2019-03-19T09:33:28 | 2019-03-19T09:33:28 | 27,152,767 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding:utf-8
"""
inspire by openstack service
"""
# TODO: 未验证异常
import signal
import os
import time
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
print 'raise SignalExit'
self.signo = signo
def parent_handler(signum, frame):
os.kill(pid, signal.SIGTERM)
try:
result = os.waitpid(pid, 0)
print 'wait %s status: %s' % result
except Exception as ex:
print ex
exit(0)
def exit_handler(signum, frame):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SystemExit(1)
def custom_exit_handler(signum, frame):
#signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signum)
print 'Parent pid: %s' % os.getpid()
print 'forking...'
pid = os.fork()
if pid:
signal.signal(signal.SIGTERM, parent_handler)
while True:
time.sleep(5)
print 'Parent wait terminate'
else:
#signal.signal(signal.SIGTERM, signal.SIG_DFL)
#signal.signal(signal.SIGTERM, exit_handler)
signal.signal(signal.SIGTERM, custom_exit_handler)
try:
while True:
print 'child running'
time.sleep(5)
except Exception as ex:
print ex
| UTF-8 | Python | false | false | 1,248 | py | 392 | custom_exit.py | 360 | 0.642165 | 0.636511 | 0 | 55 | 21.509091 | 54 |
fossabot/Mi.py | 3,770,981,287,041 | ac281c1b8782da0261fedd1ce44e82f80d62a455 | e4765d71b0d27fc05ceaea19021ad569f4b4c48a | /misskey/note.py | 4d473f9adb80f7569e359156173771f31b3a98b6 | [
"MIT"
]
| permissive | https://github.com/fossabot/Mi.py | 683d78fb190892a538d5f5f39839655b27a2379f | ec58f1f8a44c1708f235837014e993e697fe6a54 | refs/heads/master | 2023-07-14T13:42:53.829408 | 2021-08-27T13:02:43 | 2021-08-27T13:02:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import re
from misskey.user import User
class Note(object):
__slots__ = (
'id',
'created_at',
'type',
'user_id',
'author',
'text',
'cw',
'visibility',
'visible_user_ids',
'renote_count',
'replies_count',
'reactions',
'emojis',
'file_ids',
'reply_id',
'renote_id',
'deleted_at',
'uri',
'ws'
)
def __init__(self, data, ws=None, text=None):
self.ws = ws
for attr in ('id', 'createdAt', 'userId', 'user', 'text', 'cw',
'visibility', 'renoteCount', 'repliesCount', 'reactions',
'emojis', 'fileIds', 'files', 'replyId',
'renoteId'
):
try:
value = data[attr]
except KeyError:
continue
else: # エラーが発生しなかった場合は変数に追加
if attr == 'user':
setattr(self, attr, User(value))
else:
print(f'setattr(self,{attr}, {value})')
setattr(self, f'{attr}', f'{value}')
def content(self, content):
content = {
'visibility': f"{content.get('visibility', self.visibility)}",
'text': f"{content.get('text', '')}",
'cw': content.get('cw'),
'viaMobile': f"{content.get('viaMobile', 'false')}",
'localOnly': f"{content.get('localOnly', 'false')}",
'noExtractMentions': f"{content.get('noExtractMentions', 'false')}",
'noExtractHashtags': f"{content.get('noExtractHashtags', 'false')}",
'noExtractEmojis': f"{content.get('noExtractEmojis', 'false')}",
'replyId': f"{content.get('replyId', self.id)}",
}
return content
def create(self):
self.ws.send('')
def reply(self, content: dict = {}):
content = self.content(content)
self.ws.send(json.dumps(
{
'type': 'api',
'body': {
'id': 'f8b2894d-1b5d-60f3-c9ea-60851f8e9730',
'endpoint': 'notes/create',
'data': content
}
}, ensure_ascii=False))
| UTF-8 | Python | false | false | 2,336 | py | 16 | note.py | 12 | 0.449956 | 0.440818 | 0 | 75 | 29.64 | 80 |
Earmada/django_test_travis | 15,839,839,407,634 | 7adddba339d3bc2966833d8a300f1af66b3edea5 | 24d4802442c2384d3bdc7dab3eecf3f6aa5df680 | /django_test/apps/carts/api/v1/api_urls.py | de75b55a88025b13ffb300b84952d2b0fe5e43af | []
| no_license | https://github.com/Earmada/django_test_travis | 92ff885f12e7f2db09251ee3aa84f4ce30d017bf | a875a3398505c721b02bcc8cb89d102f54a49e16 | refs/heads/master | 2018-02-07T05:54:38.327666 | 2017-03-04T11:17:12 | 2017-03-04T11:17:12 | 83,034,539 | 1 | 0 | null | false | 2017-03-04T11:17:12 | 2017-02-24T11:25:00 | 2017-02-24T11:33:58 | 2017-03-04T11:17:12 | 1,704 | 0 | 0 | 0 | Python | null | null | # Python imports
# Core Django imports
from django.conf.urls import url
# Third-Party imports
# Apps Imports
from .api_controllers import CartsListAPIController, CartDetail
from django_test.apps.products_carts.api.v1.api_controllers import ProductsCartAPIController, AddProductToCart
from django_test.apps.products_carts.api.v1.api_controllers import UpdateProductInCart
urlpatterns = [
url(r'^(?P<pk>[^/]+)/products/$', ProductsCartAPIController.as_view(), name='products_cart'),
url(r'^(?P<pk>[^/]+)/products/(?P<product_id>[^/]+)/actions/add-to-cart/$', AddProductToCart.as_view(),
name='product_add_to_cart'),
url(r'^(?P<pk>[^/]+)/products/(?P<product_id>[^/]+)/actions/update-cart/$', UpdateProductInCart.as_view(),
name='update_product_in_cart'),
url(r'^(?P<pk>[^/]+)/$', CartDetail.as_view(), name='cart_detail'),
url(r'^$', CartsListAPIController.as_view(), name='carts_list'),
]
| UTF-8 | Python | false | false | 927 | py | 158 | api_urls.py | 143 | 0.691478 | 0.68932 | 0 | 20 | 45.35 | 110 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.