repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cash2one/xai | 9,156,870,323,949 | c979052f95892a654509fe328e07ca46ddad9e1b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_territories.py | 9e1e0edc9aacaed34bdd028bb27f1809b997f02c | [
"MIT"
]
| permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#calss header
class _TERRITORIES():
def __init__(self,):
self.name = "TERRITORIES"
self.definitions = territory
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['territory']
| UTF-8 | Python | false | false | 236 | py | 37,275 | _territories.py | 37,266 | 0.622881 | 0.622881 | 0 | 13 | 17 | 30 |
dangq/glandore | 14,663,018,393,479 | 90ef5d51d831148f7ec3e718145614ae6b3f18c5 | 3b39969f832d283a2833064a3d7736cbb15bd44b | /owl.py | 3fc63b58447bb33a871ab793ffc0f91b69b4c049 | []
| no_license | https://github.com/dangq/glandore | 070a9cee252ed57da2a5ef5d7a118051417c9627 | aaf015c343118a260d38927907c719dbd7c49e64 | refs/heads/master | 2021-01-01T05:27:50.511297 | 2016-04-16T08:03:33 | 2016-04-16T08:03:33 | 56,372,475 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rdflib import ConjunctiveGraph, Namespace, exceptions
from rdflib import URIRef, RDFS, RDF, BNode
import OWL
class OntoInspector(object):
"""Class that includes methods for querying an RDFS/OWL ontology"""
def __init__(self, uri, language=""):
super(OntoInspector, self).__init__()
self.rdfGraph = ConjunctiveGraph()
try:
self.rdfGraph.parse(uri, format="xml")
except:
try:
self.rdfGraph.parse(uri, format="n3")
except:
raise exceptions.Error("Could not parse the file! Is it a valid RDF/OWL ontology?")
finally:
# let's cache some useful info for faster access
self.baseURI = self.get_OntologyURI() or uri
self.allclasses = self.__getAllClasses(classPredicate)
self.toplayer = self.__getTopclasses()
self.tree = self.__getTree()
def get_OntologyURI(self, ....):
# todo
pass
def __getAllClasses(self, ....):
# todo
pass
def __getTopclasses(self, ....):
pass
def __getTree(self, ....):
# todo
pass | UTF-8 | Python | false | false | 1,163 | py | 5 | owl.py | 3 | 0.567498 | 0.566638 | 0 | 46 | 24.304348 | 99 |
minhax/Autonomous-Farming-Robot-Station | 14,920,716,434,182 | 6c24593875306af8e5b195767206865c0a7090c6 | 817f5401a8f6a2ecf4fcc073c88fd83661afbdb1 | /src/Tests/launchRobot.py | 3128f6eb834736f896fcd2ee3e1068fb2f787152 | []
| no_license | https://github.com/minhax/Autonomous-Farming-Robot-Station | 741a49df30e571f161672b60415700cc2764a27e | e2b8ae9cc8e505c85bd03019a20ad21edb784892 | refs/heads/master | 2020-04-30T17:31:40.753651 | 2019-03-07T07:57:01 | 2019-03-07T07:57:01 | 176,982,434 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Messages exchange test
from src.Robot.IA_Robot.src.Scheduler.Scheduler import *
from src.Robot.Navigation_Robot.src.Navigation.NavigationController import *
from Common.Parser.parser import Parser
from src.Robot.IA_Robot.src.Weeding.WeedingController import *
from CommunicationManager.Robot.Robot_CommunicationManager import *
def start():
print ("# ------ Robot Part1: sync_navigator ------ #")
print("[Main] Loading XML File")
# 1) Load XML File and create corresponding commands
parser = Parser('../Common/Parser/files/robot.xml')
robot = parser.obj
print("[Main] Creating Navigating and weeding Controllers")
# 2) a) sync_navigator command for the Navigation NavigationController
navigation_controller = NavigationController(robot)
weeding_controller = WeedingController(robot)
print("[Main] Creating Scheduler")
# 2) b) Scheduler organizes and invoke commands for the NavigationController and the Task1_Weeding Controller
scheduler = Scheduler(navigation_controller, weeding_controller)
print(" [Main] Launch RobotCommunicationManager")
com_manager = Robot_CommunicationManager.getInstance(scheduler)
print("[Main] Launch Socket server")
# Launch a server s, always listening
com_manager.new_listening_socket_server()
'''# 3) Transfer connexion information to server
print("[Main] Transferring information concerning robot to server ...")
msg = ConfigMsg(const.CLIENT_DEFAULT_PORT, "192.168.14.2", robot.get_robot_id, robot) # Local port on which server can communicate
# Robot_CommunicationManager.send_message_to_server(msg)
CommunicationManager.send_message_to_localhost(msg, const.CLIENT_DEFAULT_PORT)
print(" ... Done")
print (" # ------ Robot Part2 : Task1_Weeding ------- #")
print("[Main] Initialise commands created")
# Task1_Weeding action for the navigation controller includes autonomous vehicle guidance on mapping
# command_weeding_navigation = Task1_Weeding(navigation_controller, mapping)
# command_weeding_weeding = Task1_Weeding(weeding_controller)
# scheduler.store_command(command_weeding_navigation)
# scheduler.store_command(command_weeding_weeding)
print("[Main] All commands executed")
# 4) Execute all commands in the dictionnary
#scheduler.execute_commands()
'''
if __name__ == '__main__':
start() | UTF-8 | Python | false | false | 2,381 | py | 37 | launchRobot.py | 30 | 0.730785 | 0.721966 | 0 | 52 | 44.807692 | 135 |
scmbuildrelease/gitfusionsrc | 14,027,363,204,181 | 34b4d33e0904cef8a3259978f4d49e4fb6dce24b | 292557f00ce989624fc62b5c236f1ecf9d9fb350 | /libexec/profiler_merge.py | a9bcfa003f34e7a8e9050b5b71deb0d05525edb2 | []
| no_license | https://github.com/scmbuildrelease/gitfusionsrc | eb2f4b92f193656ea827c693f0feb63f4d944859 | cf73a2d4facb41009db941a87429cb66ea821ada | refs/heads/master | 2020-04-01T22:12:30.631312 | 2018-10-18T22:50:52 | 2018-10-18T22:50:52 | 153,696,211 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python3.3
"""Merge two cProfiler runs into a tab-separated textfile you can read in
a spreadsheet.
"""
from collections import defaultdict
import copy
import logging
import re
import p4gf_util
# pylint: disable = line-too-long
# Some day we'll support Python 3.4 and get its shiny
# new Enum support. Until then, here have a fake
# replacement.
try:
from enum import Enum
except ImportError:
class Enum:
"""Gee golly I wish we had Python 3.4 Enum."""
def __init__(self):
pass
# Log level reminder from p4gf_util.apply_log_args():
# verbose = INFO
# <nothing> = WARNING
# quiet = ERROR
#
LOG = logging.getLogger("profiler_merge_stdout")
# Some of the sample line comments are riduiculously long.
# pylint: disable = line-too-long
# cProfiler output
#
# tottime = total time spent in function itself, not anything it calls
# cumtime = time spent in function or anything it called
#
# High cumtime is usually where to start drilling down to find suboptimal
# algorithms.
# High tottime + very high ncalls is likely a function called in some
# O(n^2) or worse loop. Walk up the call chain to find the suboptimality.
# -- main/mopdule-wide functions ---------------------------------------------
def main():
"""Do the thing."""
args = _parse_argv()
da = parse_file(args.file_a)
db = parse_file(args.file_b)
fa = da.sanitize_functions()
fb = db.sanitize_functions()
merged = MergedStats.from_stats(fa, fb)
LOG.warning(str(merged))
def _parse_argv():
"""Convert command line into a usable dict."""
parser = p4gf_util.create_arg_parser(
add_log_args = True
, add_debug_arg = True
, desc = "Merge two cProfiler runs into a single spreadsheet."
)
parser.add_argument('file_a', metavar='file_a')
parser.add_argument('file_b', metavar='file_b')
args = parser.parse_args()
p4gf_util.apply_log_args(args, LOG)
LOG.debug("args={}".format(args))
return args
def parse_file(filename):
"""Read a line into a parsed XXX."""
class State(Enum):
"""Where are we within the file?"""
BEFORE_SUMMARY = "pre summary"
IN_SUMMARY = "in summary"
IN_CALLERS = "in callers"
current_caller = None
with open(filename, "r", encoding="utf-8") as fin:
state = State.BEFORE_SUMMARY
stats = Stats()
for line in fin:
if state is State.BEFORE_SUMMARY:
if SummaryLine.START_PICKET_RE.match(line):
state = State.IN_SUMMARY
LOG.debug("sl: " + SummaryLine.HEADER)
continue
elif state is State.IN_SUMMARY:
sl = SummaryLine.from_line(line)
if sl:
LOG.debug("sl: " + str(sl))
stats.add_summary_line(sl)
continue
elif CallerLine.START_PICKET_RE.match(line):
state = State.IN_CALLERS
continue
elif state is State.IN_CALLERS:
caller_line = CallerLine.from_line(line)
if caller_line:
current_caller = caller_line.caller
LOG.debug("cl: " + str(caller_line))
LOG.debug("cl: " + CalleeLine.HEADER)
line = caller_line.remainder
# intentional fall through to process
# remainder as callee
callee_line = CalleeLine.from_line(line)
if callee_line:
stats.add_callee_line(current_caller, callee_line)
LOG.debug("cl: " + str(callee_line))
return stats
def _ncalls(s):
"""Un-split any "recursive calls/primitive calls" slashed ncalls values."""
if "/" in s:
(recursive_ncalls, primitive_ncalls) = s.split("/")
return int(recursive_ncalls) + int(primitive_ncalls)
return int(s)
def _sanitize_package(orig):
"""Shorten ridiculously long package paths."""
line = orig
# python3.3 packages (pygit2, P4Python)
# Do these BEFORE general python3.3, since python3.3 prefix would match site-packages prefix
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:569(run)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:749(__flatten)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:877(insert)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pygit2/repository.py:58(__init__)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pygit2/repository.py:71(_common_init)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pytz/__init__.py:245(__str__)
python_packages_re = re.compile(r'.*/site-packages/(.*)')
m = python_packages_re.match(orig)
if m:
line = m.group(1)
package_module_re = re.compile(r'([^/]+)/(.*)')
m = package_module_re.match(line)
if m:
package = m.group(1)
module = m.group(2)
for p in ['p4python', 'pygit2']:
if p in package:
package = p
line = package + "/" + module
return line
# python3.3 library:
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/multiprocessing/synchronize.py:296(is_set)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/os.py:671(__getitem__)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/os.py:694(__iter__)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/re.py:158(search)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/re.py:212(compile)
# /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/tempfile.py:386(__del__)
python33_re = re.compile(r'.*/python3.3/(.*)')
m = python33_re.match(line)
if m:
line = m.group(1)
return line
# Git Fusion
# /Users/zig/Dropbox/git-fusion-main/bin/p4gf_atomic_lock.py:177(update_all_gf_reviews)
# /Users/zig/Dropbox/git-fusion-main/bin/p4gf_atomic_lock.py:202(update_repo_reviews)
# /Users/zig/Dropbox/git-fusion-main/bin/p4gf_util_p4run_logged.py:49(_log_p4_request)
# /Users/zig/Dropbox/git-fusion-main/bin/p4gf_util_p4run_logged.py:55(_log_p4_results)
git_fusion_re = re.compile(r'.*/(p4gf_[^/]+)')
m = git_fusion_re.match(line)
if m:
line = m.group(1)
return line
# Built-in (leave unchanged)
# {built-in method chdir}
# {built-in method discover_repository}
# {built-in method getcwd}
# {built-in method getfilesystemencoding}
# {built-in method hasattr}
# {built-in method isinstance}
# {built-in method len}
# {built-in method max}
# {built-in method poll}
# {built-in method proxy}
# {built-in method sorted}
# {built-in method time}
# {method 'acquire' of '_multiprocessing.SemLock' objects}
# {method 'add' of 'set' objects}
# {method 'append' of 'collections.deque' objects}
# {method 'append' of 'list' objects}
# {method 'as_array' of 'P4API.P4Map' objects}
# {method 'decode' of 'bytes' objects}
return line
def _sanitize_line_num(orig):
"""Strip any line numbers."""
# Don't strip list comprehension line numbers.
# They can sometimes be significant.
for c in ["<listcomp>", "<dictcomp>"]:
if c in orig:
return orig
line_num_re = re.compile(r':\d+')
m = line_num_re.search(orig)
if not m:
return orig
return orig[:m.start()] + orig[m.end():]
def _sanitize_function(orig):
"""cProfiler function names include long file paths and line numbers.
Shorten file paths. Remove line numbers: they change between test runs.
"""
line = _sanitize_package(orig)
line = _sanitize_line_num(line)
return line
def _cell(ab, side, attr):
"""Return a formatted cell."""
if not getattr(ab, side):
return " "
v = getattr(getattr(ab, side), attr)
if isinstance(v, int):
return "{:>6d}".format(v)
elif isinstance(v, float):
return "{:>7.3f}".format(v)
else:
return str(v)
# -- end module-wide ---------------------------------------------------------
class Stats(object):
"""One file's stats, parsed."""
def __init__(self):
# list of SummaryLine
self.summary_lines = []
# dict[str(caller)] ==> CalleeLine
self.callee = defaultdict(list)
def add_summary_line(self, summary_line):
"""Record one SummaryLine."""
self.summary_lines.append(summary_line)
def add_callee_line(self, caller, callee_line):
"""Record one function's call to another."""
self.callee[caller].append(callee_line)
def sanitize_functions(self):
"""Reduce long function names into something small.
Strip line numbers: they won't match across different test runs.
Return a NEW Stats instance built out of the sanitized names.
"""
r = Stats()
for sl in self.summary_lines:
sl2 = copy.copy(sl)
sl2.function = _sanitize_function(sl.function)
r.summary_lines.append(sl2)
for k,v in self.callee.items():
k2 = _sanitize_function(k)
v2 = []
for cl in v:
cl2 = copy.copy(cl)
cl2.function = _sanitize_function(cl.function)
v2.append(cl2)
r.callee[k2] = v2
return r
def __str__(self):
l = []
l.append(SummaryLine.HEADER)
l.extend([str(sl) for sl in self.summary_lines])
for k in sorted(self.callee.keys()):
l.append(k)
v = self.callee[k]
l.append(CalleeLine.HEADER)
l.extend([str(cl) for cl in v])
return "\n".join(l)
# -- end Stats ---------------------------------------------------------------
class Side(Enum):
"""File A or File B?"""
# pylint:disable=invalid-name
A = "a"
B = "b"
class MergedCalleeDict(defaultdict):
"""One of these for each caller in MergedStats."""
def __init__(self):
defaultdict.__init__(self, AB)
class MergedStats(object):
"""Two file's stats, merged together."""
def __init__(self):
# Both data members are dicts:
# * key = (sanitized) function name
# * val = AB with value or None for the A and B sides.
self.summary_lines = defaultdict(AB)
self.callee = defaultdict(MergedCalleeDict)
def add_summary_line(self, side, summary_line):
"""Record either a's or b's summary line."""
v = self.summary_lines[summary_line.function]
setattr(v, side, summary_line)
def add_callee_line(self, side, caller, callee_line):
"""Record either a's or b's caller line."""
mcd = self.callee[caller]
v = mcd[callee_line.function]
LOG.debug("add_callee_line side={} caller='{:<30}' callee_line={}".format(side, caller, callee_line))
setattr(v, side, callee_line)
@staticmethod
def from_stats(a, b):
"""Merge two Stats into one MergedStats."""
r = MergedStats()
r.add_side(Side.A, a)
r.add_side(Side.B, b)
return r
def add_side(self, side, stats):
"""Add either A or B stats."""
for sl in stats.summary_lines:
self.add_summary_line(side, sl)
for caller, callee_lines in stats.callee.items():
for cl in callee_lines:
self.add_callee_line(side, caller, cl)
def _all_functions_sort_key(self):
"""Assist output sorting by building a dict of function name to
file_a's summary cumtime value.
Anything not in file_a's summary gets a value of 0.
"""
r = {}
for function, ab in self.summary_lines.items():
if not ab.a:
r[function] = 0.0
else:
r[function] = ab.a.cumtime
LOG.debug("afskey: {:>7.3f} {}".format(r[function],function))
return r
def all_functions(self):
"""Return a list of all functions seen anywhere, in canonical order"""
summary = set(self.summary_lines.keys())
afskey = self._all_functions_sort_key()
sum_sorted = sorted(list(summary), key=afskey.get, reverse=True)
callers = set(self.callee.keys())
callees = set()
for mcd in self.callee.values():
callees = set.union(callees, set(mcd.keys()))
all_set = set.union(summary, callers, callees)
not_sum_set = all_set - summary
not_sum_sorted = sorted(list(not_sum_set))
return sum_sorted + not_sum_sorted
def __str__(self):
# Find the widest function name and format accordingly.
sl = self._report_summary_lines()
cl = self._report_callee_lines()
return "\n".join(sl + ["\n"] + cl)
@staticmethod
def _format_summary(func_width):
"""Return a format string for the summary lines at the top.
Format specifiers for numeric items are left as STRINGS so that they
can handle blank cells.
"""
return ( "{function:<" + str(func_width) + "}"
"\t{ncalls_a:>8}"
"\t{ncalls_b:>8}"
"\t{cumtime_a:>9}"
"\t{cumtime_b:>9}"
)
def _report_summary_lines(self):
"""Return list of lines for "summary" section. Includes header."""
func_order = self.all_functions()
func_width = max(len(f) for f in func_order if f in self.summary_lines)
lines = []
fmt = self._format_summary(func_width)
header = fmt.format(
function = "function"
, ncalls_a = "ncalls_a"
, ncalls_b = "ncalls_b"
, cumtime_a = "cumtime_a"
, cumtime_b = "cumtime_b"
)
lines.append(header)
for func in func_order:
ab = self.summary_lines.get(func)
if not ab:
continue
line = fmt.format(
function = func
, ncalls_a = _cell(ab, Side.A, "ncalls")
, ncalls_b = _cell(ab, Side.B, "ncalls")
, cumtime_a = _cell(ab, Side.A, "cumtime")
, cumtime_b = _cell(ab, Side.B, "cumtime")
)
lines.append(line)
return lines
def _report_callee_lines(self):
"""Return list lines for "caller/callee" section. Includes headers."""
func_order = self.all_functions()
func_width = max(len(f) for f in func_order)
lines = []
fmt = self._format_callee(func_width + 3)
for caller_func in func_order:
mcd = self.callee.get(caller_func)
if not mcd:
LOG.debug("report_callee: SKIP {}".format(caller_func))
continue
LOG.debug("report_callee: {}".format(caller_func))
header = fmt.format(
function = "-- " + caller_func
, ncalls_a = "ncalls_a"
, ncalls_b = "ncalls_b"
, cumtime_a = "cumtime_a"
, cumtime_b = "cumtime_b"
)
lines.append(header)
for callee_func in func_order: # Ayep, O(n^2)
ab = mcd.get(callee_func)
if not ab:
continue
line = fmt.format(
function = " " + callee_func
, ncalls_a = _cell(ab, Side.A, "ncalls")
, ncalls_b = _cell(ab, Side.B, "ncalls")
, cumtime_a = _cell(ab, Side.A, "cumtime")
, cumtime_b = _cell(ab, Side.B, "cumtime")
)
lines.append(line)
return lines
@staticmethod
def _format_callee(func_width):
"""Return a format string for the callee lines.
Format specifiers for numeric items are left as STRINGS so that they
can handle blank cells.
"""
# Unintentional, but both the summary and callee lines
# contain the same layout of cells. Keeping the function
# calls separate in case I ever need to change 'em.
return MergedStats._format_summary(func_width)
# -- end MergedStats ---------------------------------------------------------
class AB(object):
"""Mutable tuple to carry two file's record of some row."""
def __init__(self):
# pylint:disable=invalid-name
self.a = None
self.b = None
# -- end AB ------------------------------------------------------------------
class SummaryLine(object):
"""One line in the summary dump of the top 100 function calls:
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 431.769 431.769 /Users/zig/Dropbox/git-fusion-main/bin/p4gf_auth_server.py:291(main_ignores)
5/1 0.000 0.000 431.769 431.769 /Users/zig/Dropbox/git-fusion-main/bin/p4gf_profiler.py:66(wrapper)
16001 0.317 0.000 163.234 0.010 /Users/zig/Dropbox/git-fusion-main/bin/p4gf_branch.py:303(add_to_config)
"""
# 1:nc 2:tot 3:per 4:cume 5:per 6:func
REGEX = re.compile(r'\s*([\d/]+)\s+([\d\.]+)\s+([\d\.]+)\s+([\d\.]+)\s+([\d\.]+)\s+(.*)')
START_PICKET_RE = re.compile(r'\s+ncalls\s+tottime\s+percall\s+cumtime\s+percall\s+filename:lineno\(function\)')
FORMAT = ( "{ncalls:>6d}"
" {tottime:>7.3f}"
" {tottime_percall:>7.3f}"
" {cumtime:>7.3f}"
" {cumtime_percall:>7.3f}"
" {function}" )
HEADER = ( "{ncalls:>6}"
" {tottime:>7}"
" {tottime_percall:>7}"
" {cumtime:>7}"
" {cumtime_percall:>7}"
" {function}").format(
ncalls = "ncalls"
, tottime = "tottime"
, tottime_percall = "percall"
, cumtime = "cumtime"
, cumtime_percall = "percall"
, function = "function"
)
def __init__(self, *
, ncalls
, tottime
, tottime_percall
, cumtime
, cumtime_percall
, function
):
self.ncalls = int(ncalls)
self.tottime = float(tottime)
self.tottime_percall = float(tottime_percall)
self.cumtime = float(cumtime)
self.cumtime_percall = float(cumtime_percall)
self.function = str(function).strip()
@staticmethod
def from_line(line):
"""Return a new SummaryLine that containas the parsed data from line.
Return None if line not a summary line.
"""
m = SummaryLine.REGEX.match(line)
if not m:
return None
return SummaryLine(
ncalls = _ncalls(m.group(1))
, tottime = m.group(2)
, tottime_percall = m.group(3)
, cumtime = m.group(4)
, cumtime_percall = m.group(5)
, function = m.group(6)
)
def __str__(self):
return SummaryLine.FORMAT.format(
ncalls = self. ncalls
, tottime = self. tottime
, tottime_percall = self. tottime_percall
, cumtime = self. cumtime
, cumtime_percall = self. cumtime_percall
, function = self. function
)
# -- end SummaryLine ---------------------------------------------------------
class CallerLine(object):
"""The first line in a "what functions were called by this function" dump.
Includes one called function's data, too.
"""
START_PICKET_RE = re.compile(r'\s+ncalls\s+tottime\s+cumtime')
REGEX = re.compile(r'(.*)\s+\-\> (.*)')
def __init__( self, *
, caller
, remainder ):
self.caller = caller.strip()
self.remainder = remainder
@staticmethod
def from_line(line):
"""parse"""
m = CallerLine.REGEX.match(line)
if not m:
return None
return CallerLine(caller = m.group(1), remainder = m.group(2))
def __str__(self):
return self.caller
# -- end CallerLine ----------------------------------------------------------
class CalleeLine(object):
"""A line in the "what functions were called by a function?" dump."""
# ncalls tottime cumtime
# 1 0.000 0.044 /Users/zig/Dropbox/git-fusion-main/bin/p4gf_copy_to_git.py:125(all_branches)
# 1:ncalls 2:tot 3:cume 4:callee
REGEX = re.compile(r'\s+([\d/]+)\s+([\d\.]+)\s+([\d\.]+)\s+(.*)')
FORMAT = ( "{ncalls:>6d}"
" {tottime:>7.3f}"
" {cumtime:>7.3f}"
" {function}" )
HEADER = ( "{ncalls:>6}"
" {tottime:>7}"
" {cumtime:>7}"
" {function}" ).format(
ncalls = "ncalls"
, tottime = "tottime"
, cumtime = "cumtime"
, function = "function"
)
def __init__( self, *
, ncalls
, tottime
, cumtime
, function
):
self.ncalls = int(ncalls)
self.tottime = float(tottime)
self.cumtime = float(cumtime)
self.function = str(function).strip()
@staticmethod
def from_line(line):
"""parse"""
m = CalleeLine.REGEX.match(line)
if not m:
return None
return CalleeLine(
ncalls = _ncalls(m.group(1))
, tottime = m.group(2)
, cumtime = m.group(3)
, function = m.group(4)
)
def __str__(self):
return CalleeLine.FORMAT.format(
ncalls = self.ncalls
, tottime = self.tottime
, cumtime = self.cumtime
, function = self.function
)
# -- end CalleeLine ----------------------------------------------------------
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 23,377 | py | 132 | profiler_merge.py | 131 | 0.526115 | 0.510673 | 0 | 635 | 35.814173 | 156 |
zeyuyuyu/HCI-Bake-off-2 | 4,037,269,271,960 | c4ca5b181702e30d2be5f7fb224462bae1203d84 | ff246438806c73e3b9e988dd01f92247ba426ffe | /209_bakeoff_2/gesture_source/imageClassification.py | 4485df2e3cf929584c12dd2de8832087f4c885e8 | []
| no_license | https://github.com/zeyuyuyu/HCI-Bake-off-2 | a42c6fb12f02b074e11cda1d9af79759218ed4e2 | ad35efd2d44a27cc60ca8101139c56e2eba2447e | refs/heads/master | 2020-09-30T06:08:31.154137 | 2019-12-10T00:10:39 | 2019-12-10T00:10:39 | 227,223,786 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
#MNIST dataset from keras
mnist = tf.keras.datasets.mnist
#tuples that have image in x and value in y.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#reduces to 0-1 from 0-255
#this may not have much of a change on the output
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
#Using sequential model with 2 layered neural network with each having 128 neurons
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(optimizer= 'adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3)
val_loss, val_acc = model.evaluate(x_test, y_test)
print(val_acc, val_loss) | UTF-8 | Python | false | false | 949 | py | 5 | imageClassification.py | 2 | 0.72392 | 0.701791 | 0 | 29 | 31.758621 | 82 |
eric9687/Image_Classification_Competiton | 6,682,969,141,714 | 3765d7b634edd390fa6866189981b420e11d019f | 3c0b68400b224fab5af175c1670e6ad905cbe577 | /T2024/p1-image-classifier/train.py | 04bb81396d714a6302d553c7fb60a0b79bd2bdd9 | []
| no_license | https://github.com/eric9687/Image_Classification_Competiton | 7d3d0194cf1d4783b6c1654669b80ffcc6a3918a | cc6c9354550aa8979e18e214ffaeffe74c0789bf | refs/heads/main | 2023-07-18T05:23:13.646079 | 2021-09-04T08:06:29 | 2021-09-04T08:06:29 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Library
from ast import Str
from itertools import accumulate
import os
from logging import critical
import numpy as np
import pandas as pd
import random
from PIL import Image
from pandas.core.frame import DataFrame
from scipy.sparse.construct import rand
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils import data
from torch.utils.data import Dataset, DataLoader, random_split
from torch.cuda.amp import autocast_mode, grad_scaler
from torchvision import transforms
from torchvision.transforms import Resize, ToTensor, Normalize
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from adamp import AdamP
from tqdm import tqdm
# user library
import models.create_model
import dataset.custom_dataset
import losses.f1_loss
import losses.Focal_loss
import losses.Label_smoothing
# Warning ignore
import warnings
warnings.filterwarnings("ignore")
# Image warning ignore
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# weight and bias
import wandb
################################################################################
# seed 고정
def set_seed(random_seed):
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_seed(42)
# PyTorch version & Check using cuda
print ("PyTorch version:[%s]."%(torch.__version__))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #'cuda:0'
print ("device:[%s]."%(device))
################################################################################
# data.csv, info.csv path
TRAIN_PATH = '/opt/ml/input/data/train/data.csv'
EVAL_PATH = '/opt/ml/input/data/eval'
PATH = '/opt/ml/input/data/checkpoint'
# target
TARGET = 'class'
# transforms
transform = transforms.Compose([
transforms.Resize((512,384), Image.BILINEAR),
transforms.ToTensor(),
transforms.RandomHorizontalFlip(0.5),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.2, 0.2, 0.2)),
])
# wandb init
# wandb.login()
config = {
'ALPHA' : None,
'batch_size' : 32,
'learning_rate' : 0.002,
}
# wandb.init(project='test-model',config=config)
# wandb.run.name = 'mlp_mixer'
# model name
MODEL_NAME = 'efficientnet_b4'
# epochs
NUM_EPOCHS = 20
NUM_CLASSES = 18
PRETRAINED = True
# 중앙을 중심으로 지킬앤 하이드 처럼 좌우에 컷믹스
def rand_bbox(size, lam):
H = size[2]
W = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = int(W * cut_rat)
cut_h = int(H * cut_rat)
cx = np.random.randn() + W//2
cy = np.random.randn() + H//2
# 패치의 4점
bbx1 = np.clip(cx - cut_w // 2, 0, W//2)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W//2)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return int(bbx1), int(bby1), int(bbx2), int(bby2)
################################################################################
df = pd.read_csv(TRAIN_PATH, index_col=0)
# model
from mlp_mixer_pytorch import MLPMixer
# train
torch.cuda.empty_cache() # empty cache
stratifiedKFold = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
print('train start')
for index, (train_idx, val_idx) in enumerate(stratifiedKFold.split(df['path'],df['class'])):
if index == 0: continue
print('{idx}th stratifiedKFold'.format(idx=index))
df_train = pd.DataFrame(columns=['path','class'])
df_val = pd.DataFrame(columns=['path','class'])
for i in train_idx:
df_train = df_train.append({'path':df['path'].loc[i], 'class':df['class'].loc[i]}, ignore_index=True)
for i in val_idx:
df_val = df_val.append({'path':df['path'].loc[i], 'class':df['class'].loc[i]}, ignore_index=True)
train_set = dataset.custom_dataset.Custom_Dataset(df_train, target=TARGET, transforms=transform, train=True)
val_set = dataset.custom_dataset.Custom_Dataset(df_val, target=TARGET, transforms=transform, train=True)
train_loader = DataLoader(train_set, batch_size=32, num_workers=4)
val_loader = DataLoader(val_set, batch_size=32, num_workers=4)
model = models.create_model.ImgClassifierModel(model_name=MODEL_NAME, num_classes=NUM_CLASSES, pretrained=PRETRAINED)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])
for s in ['train', 'validation']:
if s == 'train':
model.train()
scaler = grad_scaler.GradScaler()
best_f1, best_epoch = 0., 0.
stop_count = 0
for epoch in range(NUM_EPOCHS):
running_loss = 0.0
running_acc = 0.0
n_iter = 0
epoch_f1 = 0.0
for index, (images, labels) in enumerate(tqdm(train_loader)):
images = images.to(device)
target = labels.to(device)
optimizer.zero_grad()
if np.random.random() > 0.5: # Cutmix
random_index = torch.randperm(images.size()[0])
target_a = target
targeb_b = target[random_index]
lam = np.random.beta(1.0, 1.0)
bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam)
images[:, :, bbx1:bbx2, bby1:bby2] = images[random_index, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2]))
with autocast_mode.autocast():
logits = model(images.float())
loss = criterion(logits, target_a) * lam + criterion(logits, targeb_b) * (1. - lam)
_, preds = torch.max(logits, 1)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
with autocast_mode.autocast():
logits = model(images.float())
loss = criterion(logits, target)
_, preds = torch.max(logits, 1)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
running_loss += loss.item() * images.size(0)
running_acc += torch.sum(preds == target.data)
epoch_f1 += f1_score(target.cpu().numpy(), preds.cpu().numpy(), average='macro')
n_iter += 1
epoch_loss = running_loss / len(train_loader.dataset)
epoch_acc = running_acc / len(train_loader.dataset)
epoch_f1 = epoch_f1 / n_iter
print(f"epoch: {epoch} - Loss : {epoch_loss:.3f}, Accuracy : {epoch_acc:.3f}, F1-Score : {epoch_f1:.4f}")
# wandb.log({'acc' : epoch_acc, 'f1 score' : epoch_f1, 'loss' : epoch_loss,})
if epoch_f1 > best_f1:
stop_count = 0
dir = f'/opt/ml/input/data/checkpoint'
torch.save(model.state_dict(), f'{dir}/model_saved_{epoch}.pt')
best_f1 = epoch_f1
best_epoch = epoch
else:
stop_count += 1
if stop_count > 2:
break
else:
load_dir = os.path.join(dir, 'model_saved_{best_epoch}.pt')
model.load_state_dict(torch.load(load_dir))
model.eval()
with torch.no_grad():
val_loss_items = []
val_acc_items = []
val_f1_score = 0.0
v_iter = 0
for val_batch in val_loader:
inputs, labels = val_batch
inputs = inputs.to(device)
labels = labels.to(device)
outs = model(inputs)
preds = torch.argmax(outs, dim=-1)
loss_item = criterion(outs, labels).item()
acc_item = (labels == preds).sum().item()
val_loss_items.append(loss_item)
val_acc_items.append(acc_item)
val_f1_score += f1_score(labels.cpu().numpy(), preds.cpu().numpy(), average='macro')
v_iter += 1
val_loss = np.sum(val_loss_items) / len(val_loader)
val_acc = np.sum(val_acc_items) / len(val_set)
val_f1 = np.sum(val_f1_score) / v_iter
print(f'val_acc : {val_acc:.3f}, val loss : {val_loss:.3f}, val f1 : {val_f1:.3f}')
# wandb.log({'val_acc' : val_acc, 'val_loss' : val_loss, 'val_f1' : val_f1})
del model, optimizer, val_loader, scaler
torch.cuda.empty_cache()
################################################################################
| UTF-8 | Python | false | false | 9,204 | py | 88 | train.py | 50 | 0.534317 | 0.518142 | 0 | 281 | 31.558719 | 124 |
pradgap/MachineLearning | 5,446,018,538,795 | 72f513a266c1ce2132f0fcf7e3e4b81a780febc7 | a413fb6da5910da9a3202f99c9cc584dca7cf2f2 | /Spark/KMeansClustering.py | 8710eedfb8e61425fe67a27a723e8db33c025a64 | []
| no_license | https://github.com/pradgap/MachineLearning | b96a5770201f96edc11e62d3b3b36d62eb672e30 | 216042a01452c0198245e85d3dbb85267936dd65 | refs/heads/master | 2021-01-11T12:27:44.517074 | 2017-01-02T00:17:18 | 2017-01-02T00:17:18 | 76,627,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from numpy import array
from math import sqrt
from pyspark import SparkConf, SparkContext
from pyspark.mllib.clustering import KMeans, KMeansModel
def main(sc):
rawRdd=sc.textFile('network_PCA_15Features2.csv')
# print rdd.count()
# rddTrain, rddTest= rdd.randomSplit([.8,.2])
# print 'Done with random split'
rdd=rawRdd.map(lambda line: parseData(line))
parsedRdd=rdd.map(lambda line: line[0:-1])
model=KMeans.train(parsedRdd,7,maxIterations=10,runs=10, initializationMode='random')
print model.centers
error=parsedrdd.map(lambda line:WGSE(line,model)).reduce(lambda a,b:a+b)
print 'The error for WGSE is ::'
print error
clusterCount=rdd.map(lambda line:cluster(line,model)).reduceByKey(lambda a,b: np.add(a,b))
print clusterCount.collect()
def WGSE(line,model):
predict=model.predict(line)
center=model.centers[predict]
return sqrt(sum([x**2 for x in (line-center)]))
def parseData(line):
split=line.split(',')
return array([float(x) for x in split])
def cluster(line,model):
list=[0]*23
list[int(float(line[-1]))]=1
predict=model.predict(line[0:-1])
# clusterList[predict][line[-1]]=clusterList[predict][line[-1]]+1
return (predict,list)
if __name__=="__main__":
conf=SparkConf().setAppName('sparkApp')
conf=conf.setMaster("local[*]")
sc=SparkContext(conf=conf)
main(sc)
| UTF-8 | Python | false | false | 1,410 | py | 5 | KMeansClustering.py | 4 | 0.687943 | 0.671631 | 0 | 40 | 34.25 | 94 |
PLUSLEE/_Python_exercises | 7,447,473,315,135 | 1c422da4c943c00b6109c7c9415be6fe4ac7f7b1 | d064eb4007ed7c2600e5af7cd125999cab9d1946 | /Process-oriented Programming面向过程/_04_loop_code.py | 8524ff08cbe4ee00da256c517e3dd2e9028be263 | []
| no_license | https://github.com/PLUSLEE/_Python_exercises | 5361f5df425c1eae07bcd6a81864bb4c7d3ed1c3 | a74008b7f60bb114b94f4c51fe45f039c4aa915f | refs/heads/master | 2021-04-03T08:24:47.838485 | 2018-04-05T15:08:04 | 2018-04-05T15:08:04 | 124,406,076 | 1 | 0 | null | false | 2018-03-09T09:18:41 | 2018-03-08T14:52:56 | 2018-03-08T14:53:07 | 2018-03-09T09:17:46 | 7 | 0 | 0 | 1 | Python | false | null | #-*-coding:utf-8-*-
print(1+2+3)
#for...in循环,依次把list或者tuple中的每一个元素迭代出来
classmate = ["Lee",'CFF','LZ']
for name in classmate:
print(name)
#输出1-10之和
sum = 0
for x in[1,2,3,4,5,6,7,8,9,10]:
sum = sum + x
print(sum)
#rang()函数可以自动生成序列数,然后通过list()函数将序列数转化为list
#生成的数字不包含本身,包含最开的0
ls = list(range(10))
for x in ls:
print(x)
#同样的方法,输出0-100之和
sum = 0
for x in range(101):
sum = sum + x
print(sum)
#-------------------------------------------------------
#while循环,只要条件满足就重复循环
#计算100以内的所有奇数之和
sum = 0
n = 99
while n >0:
sum = sum +n
n-=2
print(sum)
#-------------------------------------------------------
#break和continue的用途
'''
break是直接中断循环
continue是跳过当前这次循环,直接执行下一次
'''
#这是一个死循环
while 1:
print("Dead")
| UTF-8 | Python | false | false | 1,000 | py | 30 | _04_loop_code.py | 27 | 0.544959 | 0.491826 | 0 | 52 | 13.115385 | 56 |
MihaZelnik/meepleAPI | 10,411,000,754,422 | 32a9a624ce79e2163fcfe672407077fa539b3d0b | 3aca0a94e0b599f562362d6aad07e0a8589e0d43 | /meeple/apps/utils/analytics.py | 3bb16af9c6416dd027ea4bbb12f470251102b93a | [
"BSD-3-Clause"
]
| permissive | https://github.com/MihaZelnik/meepleAPI | ec0ae9ed0822c53fc3587247b773134b0786a023 | 60f364cd7959a91335167d7da674b35d80dcef49 | refs/heads/master | 2018-01-07T11:17:38.926135 | 2015-05-14T17:12:49 | 2015-05-14T17:12:49 | 34,412,845 | 1 | 1 | null | false | 2015-05-26T19:43:07 | 2015-04-22T20:02:29 | 2015-05-01T23:07:04 | 2015-05-26T19:43:07 | 1,921 | 1 | 1 | 1 | Python | null | null | from django.conf import settings
import keen
import logging
def keen_hit(type, resource, request):
if not settings.KEEN_DEBUG:
ip = request.META.get('REMOTE_ADDR')
user_agent = request.META.get('HTTP_USER_AGENT')
try:
keen.add_event(
"api_hit",
{
"url": request.path,
"type": type,
"resource": resource,
"ip_address": ip,
"user_agent": user_agent
}
)
except:
logging.warning("Couldn't send event to keen")
| UTF-8 | Python | false | false | 629 | py | 68 | analytics.py | 41 | 0.467409 | 0.467409 | 0 | 23 | 26.347826 | 58 |
hitzjd/Balance-Simulate | 16,088,947,514,793 | 4952cf07c79cf23bcfb86e19e1302314aded4808 | 918b8b356abdaed27ee2dc1ad45503e32d8d8080 | /twisted/test/test_defgen.py | c5011bbbea19fe1132fc01079629d571aa2d0f39 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | https://github.com/hitzjd/Balance-Simulate | 683c7b424195131e4ec5691e930e0ed909631d0d | 22f06f34b0e4dbbf887f2075823dcdf4429e4b8e | refs/heads/master | 2020-03-12T09:51:50.704283 | 2018-04-22T11:27:18 | 2018-04-22T11:27:18 | 130,561,072 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import generators, nested_scopes
from twisted.internet import reactor
from twisted.trial import unittest, util
from twisted.internet.defer import waitForDeferred, deferredGenerator, Deferred
from twisted.internet import defer
def getThing():
d = Deferred()
reactor.callLater(0, d.callback, "hi")
return d
def getOwie():
d = Deferred()
def CRAP():
d.errback(ZeroDivisionError('OMG'))
reactor.callLater(0, CRAP)
return d
class DefGenTests(unittest.TestCase):
def _genWoosh(self):
x = waitForDeferred(getThing())
yield x
x = x.getResult()
self.assertEquals(x, "hi")
ow = waitForDeferred(getOwie())
yield ow
try:
ow.getResult()
except ZeroDivisionError, e:
self.assertEquals(str(e), 'OMG')
yield "WOOSH"
return
_genWoosh = deferredGenerator(_genWoosh)
def testBasics(self):
return self._genWoosh().addCallback(self.assertEqual, 'WOOSH')
def testBuggyGen(self):
def _genError():
yield waitForDeferred(getThing())
1/0
_genError = deferredGenerator(_genError)
return self.assertFailure(_genError(), ZeroDivisionError)
def testNothing(self):
def _genNothing():
if 0: yield 1
_genNothing = deferredGenerator(_genNothing)
return _genNothing().addCallback(self.assertEqual, None)
def testDeferredYielding(self):
# See the comment _deferGenerator about d.callback(Deferred).
def _genDeferred():
yield getThing()
_genDeferred = deferredGenerator(_genDeferred)
return self.assertFailure(_genDeferred(), TypeError)
def testHandledTerminalFailure(self):
"""
Create a Deferred Generator which yields a Deferred which fails and
handles the exception which results. Assert that the Deferred
Generator does not errback its Deferred.
"""
class TerminalException(Exception):
pass
def _genFailure():
x = waitForDeferred(defer.fail(TerminalException("Handled Terminal Failure")))
yield x
try:
x.getResult()
except TerminalException:
pass
_genFailure = deferredGenerator(_genFailure)
return _genFailure().addCallback(self.assertEqual, None)
def testHandledTerminalAsyncFailure(self):
"""
Just like testHandledTerminalFailure, only with a Deferred which fires
asynchronously with an error.
"""
class TerminalException(Exception):
pass
d = defer.Deferred()
def _genFailure():
x = waitForDeferred(d)
yield x
try:
x.getResult()
except TerminalException:
pass
_genFailure = deferredGenerator(_genFailure)
deferredGeneratorResultDeferred = _genFailure()
d.errback(TerminalException("Handled Terminal Failure"))
return deferredGeneratorResultDeferred.addCallback(
self.assertEqual, None)
def testStackUsage(self):
# Make sure we don't blow the stack when yielding immediately
# available values
def _loop():
for x in range(5000):
# Test with yielding a deferred
x = waitForDeferred(defer.succeed(1))
yield x
x = x.getResult()
yield 0
_loop = deferredGenerator(_loop)
return _loop().addCallback(self.assertEqual, 0)
def testStackUsage2(self):
def _loop():
for x in range(5000):
# Test with yielding a random value
yield 1
yield 0
_loop = deferredGenerator(_loop)
return _loop().addCallback(self.assertEqual, 0)
| UTF-8 | Python | false | false | 3,906 | py | 20 | test_defgen.py | 17 | 0.602663 | 0.597286 | 0 | 136 | 27.713235 | 90 |
DamienOConnell/MIT-600.1x | 14,121,852,477,581 | f7307100ceb6d56234408aee7effd1b6fcdaa66e | 793de7bd510c0b6509f1413353d912bc8ef9bfb0 | /Week_1/turn_right.py | a9f8363183b462165d1f14c8f83ccd334c6f2067 | []
| no_license | https://github.com/DamienOConnell/MIT-600.1x | eb24490bb5148348d4b092db5a776a41ec1c6819 | 319d45bbbea991b9342c99874d8aad1dd6dc5d38 | refs/heads/master | 2020-06-06T03:37:19.904231 | 2019-12-15T04:40:17 | 2019-12-15T04:40:17 | 192,628,180 | 0 | 0 | null | false | 2019-10-21T09:25:54 | 2019-06-19T00:05:07 | 2019-06-19T00:12:07 | 2019-10-21T09:25:52 | 524 | 0 | 0 | 0 | Python | false | false | #!/usr/bin/env python3
x = input("You are in the Lost Forest, turn right, or left?")
while x == "right":
x = input("You are in the Lost Forest, turn right, or left?")
print ("You have exited the Lost Forest!")
| UTF-8 | Python | false | false | 215 | py | 62 | turn_right.py | 57 | 0.660465 | 0.655814 | 0 | 6 | 34.833333 | 65 |
haru-mingshi052/Melanoma-Classification | 15,375,982,964,497 | 908fa0d56e78c72475f12c37b71e23c081189d51 | 87bd53e35056e738402c6d4210615f32e57c35ad | /pytorch/data_processing/dataset.py | b9298af01e663fe5cfaab0f9b33e73de3a54f618 | []
| no_license | https://github.com/haru-mingshi052/Melanoma-Classification | 5124b58d02d175f99c3caa197ce7bb32fa7f9759 | 7be4bd197b01b4fad54c3e8023e50547b0a9329f | refs/heads/master | 2023-04-08T12:51:52.781132 | 2021-04-19T12:47:16 | 2021-04-19T12:47:16 | 354,457,578 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import os
import cv2
from torch.utils.data import Dataset
"""
pytorchのデータセットを作成する関数
"""
#================================
# Melanoma Dataset
#================================
class MelanomaDataset(Dataset):
def __init__(self, df, imfolder, train, transforms, meta_features):
self.df = df
self.imfolder = imfolder
self.transforms = transforms
self.train = train
self.meta_features = meta_features
def __getitem__(self, index):
im_path = os.path.join(self.imfolder, self.df.iloc[index]['image_name'] + '.jpg')
x = cv2.imread(im_path)
meta = np.array(self.df.iloc[index][self.meta_features].values, dtype = np.float32)
if self.transforms:
x = self.transforms(x)
if self.train:
y = self.df.iloc[index]['target']
return (x, meta), y
else:
return (x, meta)
def __len__(self):
return len(self.df) | UTF-8 | Python | false | false | 1,037 | py | 28 | dataset.py | 24 | 0.53221 | 0.528246 | 0 | 38 | 24.605263 | 91 |
ErlingLie/Text_prediction | 9,998,683,892,378 | 62621bb929aa5d2f15e41511d73393e834456794 | 79013dd2eaac23538c79ac4e91c0d6af5d3546f8 | /utils.py | d89e416442424e384a947f8b5e1315277c9289bf | [
"MIT"
]
| permissive | https://github.com/ErlingLie/Text_prediction | a9e677f23374abe26c6f63aa865da5cc03189eff | 2dd9b6f3a59037b8cbd56a5d0a7c4da63091e49b | refs/heads/master | 2021-04-04T14:24:58.509734 | 2020-03-19T14:03:30 | 2020-03-19T14:03:30 | 248,464,297 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import matplotlib.pyplot as plt
import numpy as np
import pathlib
np.random.seed(0)
torch.manual_seed(0)
# Allow torch/cudnn to optimize/analyze the input/output shape of convolutions
# To optimize forward/backward pass.
# This will increase model throughput for fixed input shape to the network
torch.backends.cudnn.benchmark = True
# Cudnn is not deterministic by default. Set this to True if you want
# to be sure to reproduce your results
torch.backends.cudnn.deterministic = True
def to_cuda(elements):
"""
Transfers every object in elements to GPU VRAM if available.
elements can be a object or list/tuple of objects
"""
if torch.cuda.is_available():
if type(elements) == tuple or type(elements) == list:
return [x.cuda() for x in elements]
return elements.cuda()
return elements
def plot_loss(loss_dict: dict, label: str = None, fmt="-"):
"""
Args:
loss_dict: a dictionary where keys are the global step and values are the given loss / accuracy
label: a string to use as label in plot legend
"""
global_steps = list(loss_dict.keys())
loss = list(loss_dict.values())
plt.plot(global_steps, loss, fmt, label=label)
def save_checkpoint(state_dict: dict,
filepath: pathlib.Path,
is_best: bool,
max_keep: int = 1):
"""
Saves state_dict to filepath. Deletes old checkpoints as time passes.
If is_best is toggled, saves a checkpoint to best.ckpt
"""
filepath.parent.mkdir(exist_ok=True, parents=True)
list_path = filepath.parent.joinpath("latest_checkpoint")
torch.save(state_dict, filepath)
if is_best:
torch.save(state_dict, filepath.parent.joinpath("best.ckpt"))
previous_checkpoints = get_previous_checkpoints(filepath.parent)
if filepath.name not in previous_checkpoints:
previous_checkpoints = [filepath.name] + previous_checkpoints
if len(previous_checkpoints) > max_keep:
for ckpt in previous_checkpoints[max_keep:]:
path = filepath.parent.joinpath(ckpt)
if path.exists():
path.unlink()
previous_checkpoints = previous_checkpoints[:max_keep]
with open(list_path, 'w') as fp:
fp.write("\n".join(previous_checkpoints))
def get_previous_checkpoints(directory: pathlib.Path) -> list:
assert directory.is_dir()
list_path = directory.joinpath("latest_checkpoint")
list_path.touch(exist_ok=True)
with open(list_path) as fp:
ckpt_list = fp.readlines()
return [_.strip() for _ in ckpt_list]
def load_best_checkpoint(directory: pathlib.Path):
filepath = directory.joinpath("best.ckpt")
if not filepath.is_file():
return None
return torch.load(directory.joinpath("best.ckpt"))
| UTF-8 | Python | false | false | 2,901 | py | 11 | utils.py | 2 | 0.654257 | 0.653223 | 0 | 79 | 34.721519 | 103 |
felixnego/Test | 9,603,546,874,795 | 89656c93cd8a649e5fbcbb9d39436ee5dc01604d | 3b70ed98ed4a657fce1f3f8c9cacb077b8b89462 | /Desktop/coding/udemy_python/helloworld/iteration.py | a2d3b74b375062434eeb30a4b0f45a8778cad2f4 | []
| no_license | https://github.com/felixnego/Test | 24eed4ad1411c76987f3054144a9f56e39fea0de | 64bfaecc7c3eaade55b13f571f8c96a5bcc9690a | refs/heads/master | 2018-02-12T21:34:19.697229 | 2017-09-24T19:46:28 | 2017-09-24T19:46:28 | 24,300,407 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | string = "1234567890"
for char in string:
print(char)
my_iterator = iter(string) # this is what the for loop is doing behind the scenes
print(my_iterator)
print(next(my_iterator)) # prints the first element of the string
print(next(my_iterator))
# the for loop creates the iterator and keeps using next until it reaches the end
# then stops the loop automatically
# for char in string IS THE SAME AS for char in iter(string)
# CHALLENGE: create a list of items
# then create an iterator using the iter function
# use a for loop to loop 'n' times, where n is the number of items in your list
# each time round the loop, use next() on your list to print the next item
# use the len() function rather than counting the number of items in the list
print()
lista_fel = ["spam", 3, 0, "test", "kylo ren", 12.65]
iterator = iter(lista_fel)
for item in range(0, len(lista_fel)):
print(next(iterator))
# we don't normally use iterators unless we work with special kind of classes
| UTF-8 | Python | false | false | 1,001 | py | 57 | iteration.py | 54 | 0.723277 | 0.706294 | 0 | 30 | 32.366667 | 86 |
AgnieszkaFalenska/IMSnPars | 16,209,206,622,570 | 48efaa53be5fa5146b02e97dee16ba52b7552912 | 60377d9a9cae6d26e61cd5fa63148f20ed4852e5 | /imsnpars/tests/asswap_tests.py | da06968684259aa2c17c802a6922a9a7a06e2cdb | [
"Apache-2.0"
]
| permissive | https://github.com/AgnieszkaFalenska/IMSnPars | 726ae2cc21e6a3dd024924eaf4599c8a6fbcf240 | e2180db93309157b3e3929a5e5c14751df49aff9 | refs/heads/master | 2021-06-30T07:16:43.462206 | 2020-12-14T20:57:25 | 2020-12-14T20:57:25 | 201,456,673 | 5 | 5 | Apache-2.0 | false | 2020-12-14T20:57:26 | 2019-08-09T11:42:39 | 2020-12-02T22:00:43 | 2020-12-14T20:57:26 | 11,933 | 2 | 2 | 4 | Python | false | false | #!/usr/bin/env python
# coding=utf-8
'''
Created on 21.08.2017
@author: falensaa
'''
import unittest
from nparser.trans.tsystem.asswap import ArcStandardWithSwap as ASWS, MPCBuilder
from nparser.trans.tsystem.asswap import ArcStandardWithSwapEagerOracle, ArcStandardWithSwapLazyOracle
from nparser.trans.tsystem import oracle
from tests.examples import *
oneWordCorrect = [ ASWS.SHIFT, ASWS.RIGHTARC ]
msCollinsCorrect = [ ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT, ASWS.RIGHTARC,
ASWS.SHIFT, ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT,
ASWS.RIGHTARC, ASWS.RIGHTARC, ASWS.RIGHTARC, ASWS.SHIFT, ASWS.RIGHTARC,
ASWS.RIGHTARC ]
hearingCorrect = [ ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT, ASWS.SHIFT, ASWS.SHIFT, ASWS.SWAP, ASWS.SWAP, ASWS.SHIFT, ASWS.SHIFT, ASWS.SHIFT, ASWS.SWAP, ASWS.SWAP,
ASWS.SHIFT, ASWS.SHIFT, ASWS.SHIFT, ASWS.SWAP, ASWS.SWAP, ASWS.LEFTARC, ASWS.RIGHTARC, ASWS.RIGHTARC, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT, ASWS.SHIFT, ASWS.RIGHTARC,
ASWS.RIGHTARC, ASWS.SHIFT, ASWS.RIGHTARC, ASWS.RIGHTARC]
afterCorrect = [ ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT, ASWS.SHIFT, ASWS.SWAP, ASWS.SHIFT, ASWS.SHIFT, ASWS.SWAP, ASWS.LEFTARC, ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.RIGHTARC, ASWS.RIGHTARC, ASWS.RIGHTARC ]
class TestASWithSwapStaticOracle(unittest.TestCase):
def testOneWordExample(self):
"""Example: oneWordExample"""
system = ASWS()
soracle = ArcStandardWithSwapEagerOracle(system)
transitions = oracle.buildStaticCorrectTransitions(oneWordExample[1], system, soracle)
self.assertEqual(oneWordCorrect, transitions)
def testMsCollinsExample(self):
"""Example: msCollinsExample"""
system = ASWS()
soracle = ArcStandardWithSwapEagerOracle(system)
transitions = oracle.buildStaticCorrectTransitions(msCollinsExample[1], system, soracle)
self.assertEqual(msCollinsCorrect, transitions)
def testHearingExample(self):
"""Example: hearingExample"""
system = ASWS()
soracle = ArcStandardWithSwapEagerOracle(system)
transitions = oracle.buildStaticCorrectTransitions(hearingExample[1], system, soracle)
self.assertEqual(hearingCorrect, transitions)
def testAfterExample(self):
"""Example: afterExample"""
system = ASWS()
soracle = ArcStandardWithSwapEagerOracle(system)
transitions = oracle.buildStaticCorrectTransitions(afterExample[1], system, soracle)
self.assertEqual(afterCorrect, transitions)
class TestASWithSwapLazyStaticOracle(unittest.TestCase):
def testAfterExample(self):
"""Example: afterExample"""
afterLazyCorrect = [ ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SHIFT, ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.SWAP, ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.RIGHTARC, ASWS.RIGHTARC, ASWS.RIGHTARC ]
system = ASWS()
soracle = ArcStandardWithSwapLazyOracle(system)
transitions = oracle.buildStaticCorrectTransitions(afterExample[1], system, soracle)
self.assertEqual(afterLazyCorrect, transitions)
def testLetterExample(self):
"""Example: letterExample"""
system = ASWS()
lazyoracle = ArcStandardWithSwapLazyOracle(system)
letterCorrect = [ASWS.SHIFT, ASWS.SHIFT, ASWS.SHIFT, ASWS.RIGHTARC, ASWS.SWAP, ASWS.SHIFT, ASWS.SHIFT,
ASWS.SHIFT, ASWS.SHIFT, ASWS.LEFTARC, ASWS.RIGHTARC, ASWS.SWAP, ASWS.SHIFT, ASWS.SHIFT,
ASWS.LEFTARC, ASWS.RIGHTARC, ASWS.RIGHTARC, ASWS.SHIFT, ASWS.RIGHTARC, ASWS.RIGHTARC]
lazyTransitions = oracle.buildStaticCorrectTransitions(letterExample[1], system, lazyoracle)
self.assertEqual(lazyTransitions, letterCorrect)
def testMPC_letter(self):
"""basic mpc test for letterExample"""
tree = letterExample[1]
self.assertFalse(tree.isProjective())
builder = MPCBuilder()
mpcs = builder.buildMPCs(tree)
self.assertEqual(mpcs[1], mpcs[2])
self.assertEqual(mpcs[3], mpcs[4])
self.assertEqual(mpcs[5], mpcs[4])
self.assertEqual(mpcs[3], mpcs[5])
self.assertNotEqual(mpcs[0], mpcs[1])
self.assertNotEqual(mpcs[2], mpcs[3])
self.assertNotEqual(mpcs[5], mpcs[6])
self.assertNotEqual(mpcs[7], mpcs[6])
def testMPC_2(self):
"""basic mpc test"""
tree = datatypes.Tree([1, -1, 5, 4, 1, 4])
self.assertFalse(tree.isProjective())
builder = MPCBuilder()
mpcs = builder.buildMPCs(tree)
self.assertCountEqual({1: 1, 2: 2, 4: 4, 5: 5, 0: 1, 3: 4}, mpcs)
if __name__ == "__main__":
unittest.main()
| UTF-8 | Python | false | false | 5,042 | py | 63 | asswap_tests.py | 55 | 0.650337 | 0.640222 | 0 | 124 | 39.580645 | 222 |
ankuraut/ANKIT_HOTEL_YOYO | 8,443,905,751,236 | 160d05bb8a9228838ffe0e6667ea135dd3ddb34f | 3b67c6507b8f532cfdce77b03244b5ff0e518a5a | /HOTEL/yoyo/views.py | 2d301058c79560cc1e01300f2ab659620f24c9a2 | []
| no_license | https://github.com/ankuraut/ANKIT_HOTEL_YOYO | d6aac1575500c4804170b280c271d6706b997da9 | a66522d36649c01a12dad63e8df0918ac897c9a3 | refs/heads/master | 2023-06-29T04:59:09.410355 | 2021-07-17T05:40:25 | 2021-07-17T05:40:25 | 294,930,636 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render , HttpResponse
from django.views.generic import ListView, FormView, View #pre-defined
from .models import Room, Booking
from .forms import AvailableForm
from yoyo.functions.available import check_availability
# Create your views here.
class RoomListView(request):
room = Room.objects.all()[0]
room_typess = dict(room.ROOM_TYPES)
#print('typess=', room_typess)
room_values = room_typess.values()
#print('typess=', room_values)
room_list = []
for room_type in room_typess:
room = room_typess.get(room_type)
room_url = reverse('yoyo:RoomDetailView', kwargs={'types': room_type})
room_list.append((room, room_url))
context = {
"room_list": room_list,
}
return render(request, 'room_list_view.html', context)
class BookingList(ListView):
model = Booking
template_name = "booking_list_view.html"
def get_queryset(self, *args, **kwargs):
if self.request.user.is_staff:
booking_list = Booking.objects.all()
return booking_list
else:
booking_list = Booking.objects.filter(user=self.request.user)
return booking_list
class RoomDetailView(View):
def get(self, request, *args, **kwargs):
types = self.kwargs.get('types',None)
form = AvailableForm()
room_list = Room.objects.filter(types=types)
if len(room_list) > 0:
room = room_list[0]
room_type = dict(room.ROOM_TYPES).get(room.type, None)
context = {
'room_type': room_type,
'form' : form,
}
return render(request, 'room_detail_view.html', context)
else:
return HttpResponse(' SORRY FOR THIS CATEGORY')
def post(self, request, *args, **kwargs):
types = self.kwargs.get('types',None)
room_list = Room.objects.filter(types=types)
form = AvailableForm(request.POST)
if form.is_valid():
data = form.cleaned_data
available_rooms = []
for room in room_list:
if check_availability(room, data['check_in'], data['check_out']):
available_rooms.append(room)
if len(available_rooms)>0:
room = available_rooms[0]
booking = Booking.objects.create(
user = self.request.user,
room = room,
check_in = data['check_in'],
check_out = data['check_out']
)
booking.save()
return HttpResponse(booking)
else:
return HttpResponse(' SORRY FOR THIS')
#This one is only for admin porpose
class BookingView(FormView):
form_class = AvailableForm
template_name = 'available_form.html'
def form_valid(self, form):
data = form.cleaned_data
room_list = Room.objects.filter(types = data['room_type'])
available_rooms = []
for room in room_list:
if check_availability(room, data['check_in'], data['check_out']):
available_rooms.append(room)
if len(available_rooms)>0:
room = available_rooms[0]
booking = Booking.objects.create(
user = self.request.user,
room = room,
check_in = data['check_in'],
check_out = data['check_out']
)
booking.save()
return HttpResponse(booking)
else:
return HttpResponse(' SORRY FOR THIS')
| UTF-8 | Python | false | false | 3,606 | py | 18 | views.py | 9 | 0.56711 | 0.565169 | 0 | 108 | 32.351852 | 78 |
dongjinhai/ssrmgmt | 18,305,150,631,955 | 11474607a6f3023afff2c74b6370d629350853f9 | e55a5f0234519686d551dd7b6e2a91123585dc7b | /ssrmgmt/settings/production.py | d58fc59d2e734261ab05ae803a537d4c82caf579 | [
"MIT"
]
| permissive | https://github.com/dongjinhai/ssrmgmt | 51ea2ee4b19a81a51c5df96db2b2858b5aeb6065 | a41e595aec503dcb191a20ea8d58233bbb8f2db0 | refs/heads/master | 2021-10-08T10:13:25.312650 | 2018-11-19T01:22:44 | 2018-11-19T01:22:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ssrmgmt',
'USER': get_env_variable('SSRMGMT_MYSQL_USER'),
'PASSWORD': get_env_variable('SSRMGMT_MYSQL_PASSWORD'),
'HOST': 'db_mysql',
'PORT': '3306',
'OPTIONS': {
# 取消外键检查
"init_command": "SET foreign_key_checks = 0;",
},
}
}
# 设置django使用redis做缓存
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://db_redis:6379', # docker部署方式
# 'LOCATION': 'redis://localhost:6379',
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'prostatic')
# 账户激活域名
USER_DOMAIN = "www.djhmgr.top"
# API相关
API_USERNAME = get_env_variable('SSRMGMT_API_USERNAME')
API_PASSWORD = get_env_variable('SSRMGMT_API_PASSWORD')
# 邮箱设置
EMAIL_HOST = "smtp.163.com"
EMAIL_HOST_USER = get_env_variable('SSRMGMT_EMAIL_USER')
EMAIL_HOST_PASSWORD = get_env_variable('SSRMGMT_EMAIL_PASSWORD')
EMAIL_USE_TLS = False
EMAIL_USE_SSL = True
EMAIL_PORT = 465
EMAIL_FROM = "ssrmgmt@163.com"
| UTF-8 | Python | false | false | 1,743 | py | 40 | production.py | 20 | 0.632047 | 0.611869 | 0 | 73 | 22.082192 | 65 |
abeja-inc/abeja-platform-cli | 18,605,798,366,803 | 2133aacc834490d7239dca7a6888b02b8cbd0073 | 28c9b66a45d17435c72fc3e97d77932ae20e9c9a | /abejacli/model/__init__.py | 239cc629d949e787b36f88330bc6863e3bf370fa | [
"Apache-2.0"
]
| permissive | https://github.com/abeja-inc/abeja-platform-cli | cf2fa497b435ebe598a13126635af481f8c77a80 | cd4909a7a4f7898524a543f541544260d0952571 | refs/heads/develop | 2023-09-01T11:01:38.450596 | 2023-07-18T02:46:03 | 2023-07-18T02:46:03 | 253,412,424 | 3 | 1 | Apache-2.0 | false | 2023-07-18T02:16:16 | 2020-04-06T06:22:32 | 2023-06-26T04:52:48 | 2023-07-18T02:16:14 | 369 | 3 | 0 | 3 | Python | false | false | import hashlib
def md5file(filename):
with open(filename, "rb") as f:
content = f.read()
return md5digest(content)
def md5digest(content: bytes) -> str:
return hashlib.md5(content).hexdigest()
| UTF-8 | Python | false | false | 217 | py | 89 | __init__.py | 78 | 0.668203 | 0.64977 | 0 | 11 | 18.727273 | 43 |
ctc316/algorithm-python | 8,040,178,791,282 | a31911ea86d44d61cc517f4a99207af7bb33fdf2 | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/G_Practice/Tag_Hash/627. Longest Palindrome.py | 0a1e23fdb1ef3689d9879c25a6da7e69d8171140 | [
"MIT"
]
| permissive | https://github.com/ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
"""
@param s: a string which consists of lowercase or uppercase letters
@return: the length of the longest palindromes that can be built
"""
def longestPalindrome(self, s):
counts = {}
for ch in s:
if ch not in counts:
counts[ch] = 1
else:
counts[ch] += 1
odd = False
length = 0
for cnt in counts.values():
if cnt % 2 == 1:
odd = True
length += cnt - 1
else:
length += cnt
if odd:
length += 1
return length | UTF-8 | Python | false | false | 641 | py | 581 | 627. Longest Palindrome.py | 580 | 0.455538 | 0.444618 | 0 | 26 | 23.692308 | 71 |
SharmaSapan/self-organizing-feature-maps-clustering | 10,565,619,573,419 | fb0223881c232f2b7b5a43d1989518f674082817 | 0bcefa895e98f81a211ae46463569cb4e206d40a | /main.py | f2eaf30c413d4da454a4d84aa3365cf1021b18cd | []
| no_license | https://github.com/SharmaSapan/self-organizing-feature-maps-clustering | 33fffc4488cc781398a86a239606f6ed3ce38823 | 2021c6a6022193895f4d12c72499f09aae34f1dc | refs/heads/master | 2023-07-10T06:03:03.935236 | 2021-08-05T04:02:38 | 2021-08-05T04:02:38 | 392,794,625 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import random
import os
# pillow image handling library
from PIL import Image
import imageio # to create gif in part 1
import numpy as np # to show image in part 2
# Pixel class to store pixel object and its properties.
class Pixel:
x = 0 # x-coordinate of pixel
y = 0 # y-coordinate of pixel
# create pixel objects using rgb, if no values given then random colors are assigned
def __init__(self, *args):
# random pixel generation if no value during object initialization
if len(args) == 0:
self.r = random.random()
self.g = random.random()
self.b = random.random()
# rgb values given during call
if len(args) > 0:
self.r = args[0]
self.g = args[1]
self.b = args[2]
# to update rgb values of the object
def setNewRGB(self, new_r, new_g, new_b):
self.r = new_r
self.g = new_g
self.b = new_b
# find the euclidean distance between this pixel and the given pixel
def getEDistance(self, p):
distance = math.sqrt(((self.r - p.r)**2) + ((self.g - p.g)**2) + ((self.b - p.b)**2))
return distance
# SOFM for colour organization
def color(map_width, map_height, arr, epochs, neigh_radius, learning_rate, folder, interval):
print("Color Organization started....")
for i in range(epochs):
lr = learning_rate*pow(math.exp(1), (-1*(i/epochs)))
neigh_size = math.ceil(neigh_radius*pow(math.exp(1), (-1*(i/epochs))))
input_pixel = Pixel() # random input space pixel which will be generated after each epoch
# pixel to find best matching unit initially r,g,b = 0 and x,y =0
BMU_pixel = Pixel(0, 0, 0)
BMU_distance = float('inf') # to calculate best distance between input pixel and BMU
#print(BMU_pixel.r,BMU_pixel.x,BMU_pixel.y)
for j in range(map_width):
for k in range(map_height):
if(BMU_distance > input_pixel.getEDistance(arr[j][k])): # if pixel distance from BMU is less, store pixel as the BMU
BMU_distance = input_pixel.getEDistance(arr[j][k])
BMU_pixel = arr[j][k]
BMU_pixel.x = j
BMU_pixel.y = k
# colour update at arr with best matching colour in input space by a factor of learning rate
b_r = BMU_pixel.r + lr * (input_pixel.r-BMU_pixel.r) # neigbourhood mulitplyer not needed as we are updating at BMU
b_g = BMU_pixel.g + lr * (input_pixel.g-BMU_pixel.g)
b_b = BMU_pixel.b + lr * (input_pixel.b-BMU_pixel.b)
#print(arr[BMU_pixel.x][BMU_pixel.y].r)
arr[BMU_pixel.x][BMU_pixel.y].setNewRGB(b_r,b_g,b_b)
#print(arr[BMU_pixel.x][BMU_pixel.y].r, BMU_pixel.r,BMU_pixel.x,BMU_pixel.y)
#print(y)
# from lower quadrant to upward quadrant in circle to update height(y-axis)
for y in range(BMU_pixel.y-neigh_radius,BMU_pixel.y+neigh_radius):
# updating pixel values at width(x-axis) to the left from BMU according to radius
xl = BMU_pixel.x - 1
# get values left of current y-axis till neighbour radius
while (((xl-BMU_pixel.x)*(xl-BMU_pixel.x) + (y-BMU_pixel.y)*(y-BMU_pixel.y)) <= (neigh_radius*neigh_radius)):
if(xl<map_width and xl>=0 and y<map_height and y>=0):
neigh_multi = pow(math.exp(1),(-1*(((arr[xl][y].getEDistance(BMU_pixel))*(arr[xl][y].getEDistance(BMU_pixel)))/(2*neigh_size*neigh_size))))
rl = arr[xl][y].r + neigh_multi * lr * (input_pixel.r-arr[xl][y].r) # if BMU then the difference will be 0 making no change to the BMU pixel colour
gl = arr[xl][y].g + neigh_multi * lr * (input_pixel.g-arr[xl][y].g)
bl = arr[xl][y].b + neigh_multi * lr * (input_pixel.b-arr[xl][y].b)
arr[xl][y].setNewRGB(rl,gl,bl) # update colour of neighbours by a factor neighbourhood multiplyer and learning rate
xl = xl-1
# updating pixel values at width to the right from BMU according to radius
xr = BMU_pixel.x
# get values right of current y-axis till neighbour radius
while (((xr-BMU_pixel.x)*(xr-BMU_pixel.x) + (y-BMU_pixel.y)*(y-BMU_pixel.y)) <= (neigh_radius*neigh_radius)):
if(xr<map_width and xr>=0 and y<map_height and y>=0):
neigh_multi = pow(math.exp(1),(-1*(((arr[xr][y].getEDistance(BMU_pixel))*(arr[xr][y].getEDistance(BMU_pixel)))/(2*neigh_size*neigh_size))))
rr = arr[xr][y].r + neigh_multi * lr * (input_pixel.r-arr[xr][y].r)
gr = arr[xr][y].g + neigh_multi * lr * (input_pixel.g-arr[xr][y].g)
br = arr[xr][y].b + neigh_multi * lr * (input_pixel.b-arr[xr][y].b)
arr[xr][y].setNewRGB(rr,gr,br)
#print(x,y,arr[x][y].r)
xr = xr+1
# to save image after user provided intervals
if i % interval == 0:
initial = Image.new('RGB', (map_width, map_height), "black")
pix = initial.load()
for row in range(map_width):
for col in range(map_height):
pix[row,col] = (int(arr[row][col].r*255),int(arr[row][col].g*255),int(arr[row][col].b*255))
saveloc = str(i+1) + "th_epoch.jpg"
initial.save(os.path.join(folder, saveloc))
print(i, "th epoch saved")
initial.save(os.path.join(folder, "final.jpg"))
class Unit:
x = 0 # x-coordinate of unit
y = 0 # y-coordinate of unit
# create single sofm unit objects randomly or using 0's, if no values given then random values are assigned
def __init__(self, frame_size):
# random weight generation during object initialization
self.sofm = random.sample(range(0, 256), frame_size) # 64 or 256 depending on frame
# to update sofm values of the object
def setNew(self, index, value):
self.sofm[index] = value
# find the euclidean distance between this unit and the given frame
def getEDistance(self, frame):
distance = 0
for index in range(len(frame)):
distance += ((self.sofm[index] - frame[index])**2)
sq = math.sqrt(distance)
return sq
def compression(epochs, learning_rate, neigh_radius, frames, frame_size, sofm_config, sofm_map):
for i in range(epochs):
lr = learning_rate*pow(math.exp(1), (-1*(i/epochs)))
neigh_size = math.ceil(neigh_radius*pow(math.exp(1), (-1*(i/epochs))))
input_frame = random.choice(frames)
BMU_frame = Unit(frame_size**2) # random at first but will have copy from network during BMU scan
BMU_distance = float('inf')
for map_row in range(sofm_config):
for map_col in range(sofm_config):
if(BMU_distance > (sofm_map[map_row][map_col]).getEDistance(input_frame)):
BMU_distance = sofm_map[map_row][map_col].getEDistance(input_frame)
BMU_frame = sofm_map[map_row][map_col]
BMU_frame.x = map_row
BMU_frame.y = map_col
for b_index in range(len(BMU_frame.sofm)):
new_weight = BMU_frame.sofm[b_index] + lr*(input_frame[b_index]-BMU_frame.sofm[b_index])
sofm_map[BMU_frame.x][BMU_frame.y].setNew(b_index, new_weight)
for y in range(BMU_frame.y-neigh_radius,BMU_frame.y+neigh_radius):
xl = BMU_frame.x - 1
# get values left of current y-axis till neighbour radius
while (((xl-BMU_frame.x)*(xl-BMU_frame.x) + (y-BMU_frame.y)*(y-BMU_frame.y)) <= (neigh_radius*neigh_radius)):
if(xl<sofm_config and xl>=0 and y<sofm_config and y>=0):
neigh_multi = pow(math.exp(1),(-1*(((sofm_map[xl][y].getEDistance(BMU_frame.sofm))*(sofm_map[xl][y].getEDistance(BMU_frame.sofm)))/(2*neigh_size*neigh_size))))
for xl_index in range(len(BMU_frame.sofm)):
new_n_weight = sofm_map[xl][y].sofm[xl_index] + neigh_multi*lr*(input_frame[xl_index]-sofm_map[xl][y].sofm[xl_index])
sofm_map[xl][y].setNew(xl_index, new_n_weight)
xl = xl-1
xr = BMU_frame.x
# get values right of current y-axis till neighbour radius arc
while (((xr-BMU_frame.x)*(xr-BMU_frame.x) + (y-BMU_frame.y)*(y-BMU_frame.y)) <= (neigh_radius*neigh_radius)):
if(xr<sofm_config and xr>=0 and y<sofm_config and y>=0):
neigh_multi = pow(math.exp(1),(-1*(((sofm_map[xr][y].getEDistance(BMU_frame.sofm))*(sofm_map[xr][y].getEDistance(BMU_frame.sofm)))/(2*neigh_size*neigh_size))))
for xr_index in range(len(BMU_frame.sofm)):
new_n_weight = sofm_map[xr][y].sofm[xr_index] + neigh_multi*lr*(input_frame[xr_index]-sofm_map[xr][y].sofm[xr_index])
sofm_map[xr][y].setNew(xr_index, new_n_weight)
xr = xr+1
def main():
part = int(input("Enter 1 for part-1 Colour, Enter 2 for part-2 Compression: "))
if part == 1:
map_width = int(input("Enter N width for grid (ideally 200-800): "))
map_height = int(input("Enter N height for grid: "))
arr = []
initial = Image.new('RGB', (map_width, map_height), "black") # to create initial image
pix = initial.load()
# initialize a 2-D list of Pixel objects(should have used a better way, I know)
for width in range(map_width):
col = []
for height in range(map_height):
col.append(Pixel())
col[height].x = width
col[height].y = height
arr.append(col)
# to access values use arr[i][j].x (x for x-coordinate, y for y-coordinate, r,g,b for r,g,b random color respectively)
epochs = int(input("Enter number of epochs: "))
neigh_radius = int(input("Enter integer radius: "))
learning_rate = float(input("Enter learning rate: "))
folder = input("Enter folder name to store image: ")
interval = int(input("Enter integer interval for image save like 50 or 100 epochs: "))
os.mkdir(folder) # create folder with user provide name
for width_p in range(map_width): # paint image
for height_p in range(map_height):
pix[width_p,height_p] = (int(arr[width_p][height_p].r*255),int(arr[width_p][height_p].g*255),int(arr[width_p][height_p].b*255))
initial.save(os.path.join(folder,"1initial_input_space.jpg"))
color(map_width, map_height, arr, epochs, neigh_radius, learning_rate, folder, interval) # sofm engine
images = [] # store gif
gifloc = os.path.join(folder, 'colour_gif.gif')
for filename in os.listdir(folder):
images.append(imageio.imread(os.path.join(folder, filename)))
imageio.mimsave(gifloc, images)
print("End of run")
if part == 2:
frame_size = int(input("Enter frame size of 8 or 16: "))
sofm_config = int(input("Enter SOFM map config '16' for 16*16 or '4' for 4*4: "))
image_name = input("Enter name of image file with extension to run test on: ")
epochs = int(input("Enter number of epochs: "))
neigh_radius = int(input("Enter neighbour radius for SOFM: "))
learning_rate = float(input("Enter learning rate: "))
savfile = input("Enter name of the file to save binary data: ") + ".bin"
print("Running...")
im = Image.open(image_name).convert('L') # load image
im.save("gray-"+image_name)
imgwidth, imgheight = im.size
v = list(im.getdata())
frames = []
v_index = 0
for frame_index in range(int(len(v)/(frame_size**2))): # divide data by frame size**2
frame = []
for item_index in range(frame_size**2): # store per frame
frame.append(v[item_index+v_index])
v_index += (frame_size**2)
frames.append(frame)
sofm_map = [] # create a sofm map of sofm_config*sofm_config
for sofm_index in range(sofm_config):
sofm_col = []
for col_index in range(sofm_config):
sofm_col.append(Unit(frame_size**2)) # initialize vector units of sofm with random vectors of frame_size**2
sofm_map.append(sofm_col)
# training on the image
compression(epochs, learning_rate, neigh_radius, frames, frame_size, sofm_config, sofm_map)
trained_sofm_1D = [] # convert sofm map from 2D to 1D to map frames from image for reference in bin
for net_index in range(len(sofm_map)):
for net_col in range(len(sofm_map[0])):
trained_sofm_1D.append(sofm_map[net_index][net_col])
# reference frames which are best matching index of sofm unit to images from frame
frame_reference = []
for img_frame_index in range(len(frames)):
BMU_dist = float('inf')
best_index = 0
d = 0 # distance between frame and unit from sofm
for unit_index in range(len(trained_sofm_1D)):
d = trained_sofm_1D[unit_index].getEDistance(frames[img_frame_index])
if(BMU_dist > d):
BMU_dist = d
best_index = unit_index
frame_reference.append(best_index)
if sofm_config == 4:
# conversion to store two indices in one byte for sofm configuration of 4*4
frame_reference_nibble_list = []
for inde in range(0,len(frame_reference),2): # get 0 and 1, then 2 and 3rd index
frame_reference_nibble = frame_reference[inde] # for if last and odd length store as it is
if inde!=len(frame_reference)-1:
frame_reference_nibble = (frame_reference[inde] << 4) | (frame_reference[inde+1])
frame_reference_nibble_list.append(frame_reference_nibble)
# NOTE: Code can handle any format but we can only store 0-255 in binary file,
# to store image resolution/frame size binary dump only works on max 2040*2040 picture. Please test picture in that resolution.
# Codebook format: Width of image(in frames), height of image(in frames), width of single frame,
# height of single frame, width of network, height of network, map result from sofm units in 1D,
# and indices of map which matches best to each frame of the image
width_image_in_frames_to_byte = [int(imgwidth/frame_size)] # binary file can only handle 2040*2040
height_image_in_frames_to_byte = [int(imgheight/frame_size)]
width_frame_to_byte = [frame_size]
height_frame_to_byte = [frame_size]
width_network_to_byte = [sofm_config]
height_network_to_byte = [sofm_config]
map_to_byte = [] # appending individual values of sofm unit in 1D
for sofm_codebook in trained_sofm_1D:
for val in sofm_codebook.sofm:
map_to_byte.append(int(val))
# data is stored on binary file name provided by user during runtime.
with open(savfile,"wb") as binary_file:
binary_file.write(bytes(width_image_in_frames_to_byte))
binary_file.write(bytes(height_image_in_frames_to_byte))
binary_file.write(bytes(width_frame_to_byte))
binary_file.write(bytes(height_frame_to_byte))
binary_file.write(bytes(width_network_to_byte))
binary_file.write(bytes(height_network_to_byte))
binary_file.write(bytes(map_to_byte)) # saving map to bytes file
if sofm_config == 4: # store one nibble per byte
binary_file.write(bytes(frame_reference_nibble_list)) # saving codebook indices, 2 indices per byte
else: # else if sofm_config 16 then store one per byte
binary_file.write(bytes(frame_reference)) # saving codebook indices
# reading data after storing
with open(savfile,"rb") as binary_file:
data = binary_file.read()
print("Codebook saved")
# converting binary to usable format.
convert_width_image_in_frames_to_byte = convert_height_image_in_frames_to_byte= convert_width_frame_to_byte= convert_height_frame_to_byte= convert_width_network_to_byte= convert_height_network_to_byte = 0
convert_sofm_map_values = []
convert_frame_reference = []
for i in range(len(data)):
if i == 0:
convert_width_image_in_frames_to_byte = data[i]
if i == 1:
convert_height_image_in_frames_to_byte = data[i]
if i == 2:
convert_width_frame_to_byte = data[i]
if i == 3:
convert_height_frame_to_byte = data[i]
if i == 4:
convert_width_network_to_byte = data[i]
if i == 5:
convert_height_network_to_byte = data[i]
# getting sofm map values
if i > 5 and i <= ((convert_width_network_to_byte*convert_height_network_to_byte*convert_width_frame_to_byte*convert_height_frame_to_byte)+5):
convert_sofm_map_values.append(data[i])
# getting codebook indices
if i > ((convert_width_network_to_byte *convert_height_network_to_byte*convert_width_frame_to_byte*convert_height_frame_to_byte)+5):
if sofm_config == 4:
convert_frame_reference.append((data[i] >> 4)) # high nibble
convert_frame_reference.append((data[i] & 0x0F)) # low nibble
else:
convert_frame_reference.append(data[i]) # full byte if sofm configuration is 16*16
image_width = convert_width_image_in_frames_to_byte*convert_width_frame_to_byte
image_height = convert_height_image_in_frames_to_byte*convert_height_frame_to_byte
network_units = convert_width_frame_to_byte*convert_height_frame_to_byte #64 or 256
convert_map_to_2D = [] # to store sofm map back into 2D
unit_vector_i = 0
for map_index in range(convert_width_network_to_byte*convert_height_network_to_byte):
convert_unit = []
for iterate in range(network_units):
convert_unit.append(convert_sofm_map_values[iterate+unit_vector_i])
unit_vector_i += network_units
convert_map_to_2D.append(convert_unit)
# reconstruct image frames using sofm map and codebook indices
reconstructed_image_frames = [] # which is similar to frames list made from image data
for convert_index in convert_frame_reference:
reconstructed_image_frames.append(convert_map_to_2D[convert_index]) # add frames from map using index
# to see difference between original and compressed
accuracy_sum = 0
for frame_in in range(len(reconstructed_image_frames)):
for pixel_index in range(len(reconstructed_image_frames[0])):
accuracy_sum += ((reconstructed_image_frames[frame_in][pixel_index]-frames[frame_in][pixel_index])**2)
accuracy = math.sqrt(accuracy_sum)
print(accuracy)
# to show image
hh = np.asarray(reconstructed_image_frames).reshape(image_height,image_width)
imii = Image.fromarray(hh)
imii.show()
# imii.save("savetest.png") stopped working
if __name__ == '__main__' :
main() | UTF-8 | Python | false | false | 19,529 | py | 2 | main.py | 1 | 0.594091 | 0.583645 | 0 | 356 | 53.859551 | 212 |
benkopolis/kmeanstriangleclustering | 14,851,996,944,478 | 9d512b0cc8d4bca0a029a466b02525f429d5886b | 366e2cdd40f6afef727cf24f71c70e555410fef5 | /app/configs.py | b51147275d50497d1b5a81eb54e94c76273f7961 | []
| no_license | https://github.com/benkopolis/kmeanstriangleclustering | 20a1060040074dfd28125c8b87b2d387e6bf65f5 | 8714d70f1a3e6542d44cfe16e29ce97de64622ff | refs/heads/master | 2020-12-24T13:17:10.643581 | 2017-03-22T20:31:07 | 2017-03-22T20:31:07 | 32,231,248 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from getfiles import get_fileid, get_filename
class Config:
"""
Basic class for config
"""
def __init__(self, pipe_name, inputid, outputid):
self.pipe_name = pipe_name
self.input_id = inputid
self.output_id = outputid
def to_string(self):
"""
Converts to comand line params
"""
if self.pipe_name is not None and len(self.pipe_name) > 0:
return "-pipe {}".format(self.pipe_name)
else:
return ""
class ClusteringConfig(Config):
"""
Comand line config for clustering
"""
def __init__(self, input, out, picker, distance, iterations, pipeName):
Config.__init__(self, pipeName, ["tfidf"], ["clustered", "tclustered"])
self.tfidf = input
self.out = out
self.picker = picker
self.iterations = iterations
self.distance = distance
def to_string(self):
return "-input {} -out {} -iter {} {} {} {}".format(
get_filename(self.tfidf),
get_filename(self.out),
self.iterations,
self.distance,
self.picker,
Config.to_string(self))
class TfidfConfig(Config):
"""
Tfidf config clas
"""
def __init__(
self,
stemFile,
stopOut,
stopStats,
stopIn,
tfidf,
minVariation,
minDocFreq,
pipeName):
Config.__init__(self, pipeName, ["stem", "stop"], ["tfidf", "stop"])
self.stem = stemFile
self.stop_out = stopOut
self.stop_in = stopIn
self.stop_stats = stopStats
self.tfidf = tfidf
self.variation = minVariation
self.doc_freq = minDocFreq
def to_string(self):
if self.stop_in is not None:
return "-stem {} -istop {} -tfidf {} {}".format(
get_filename(self.stem),
get_filename(self.stop_in),
get_filename(self.tfidf),
Config.to_string(self))
else:
return \
"-stem {} -stop {} -stop_stats {} -tfidf {} -min_variation {} -min_docfreq {} {}" \
.format(get_filename(self.stem),
get_filename(self.stop_out),
get_filename(self.stop_stats),
get_filename(self.tfidf),
self.variation,
self.doc_freq,
Config.to_string(self))
class RandConfig(Config):
"""
Rand config class
"""
def __init__(self, partOne, partTwo, pipe_name):
Config.__init__(self, pipe_name, ["clustered", "tclustered"], [])
self.first_partition = partOne
self.second_partition = partTwo
def to_string(self):
return "-randindex -pone {} -ptwo {} {}".format(
get_filename(self.first_partition),
get_filename(self.second_partition),
Config.to_string(self))
class VariationOfInformationConfig(Config):
"""
Variation of information config class
"""
def __init__(self, partOne, partTwo, pipe_name):
Config.__init__(self, pipe_name, ["clustered", "tclustered"], [])
self.first_partition = partOne
self.second_partition = partTwo
def to_string(self):
return "-varoi -pone {} -ptwo {} {}".format(
get_filename(self.first_partition),
get_filename(self.second_partition),
Config.to_string(self))
class SilhouetteConfig(Config):
"""
Silhouette config class
"""
def __init__(self, partition, tfidf, distance_param, pipe_name):
Config.__init__(self, pipe_name, ["tfidf", "clustered", "tclustered"], [])
self.partition = partition
self.tfidf = tfidf
self.distance = distance_param
def to_string(self):
return "-silhouette -partition {} -tfidf {} {} {}".format(
get_filename(self.partition),
get_filename(self.tfidf),
self.distance,
Config.to_string(self))
| UTF-8 | Python | false | false | 4,063 | py | 201 | configs.py | 173 | 0.537042 | 0.536795 | 0 | 128 | 30.734375 | 95 |
robo-monk/jolenejs | 4,088,808,872,311 | e1bbcf73133bcada1f7f58efb2a74dc500c9e45d | b78dca0ad948d6e2ec11435f951b71774d7d1781 | /.pnpm/scripts/build | 0d6ff8fb563070a97a1c0dc306803a150aaa7d1b | []
| no_license | https://github.com/robo-monk/jolenejs | ca4d81cba39ad5eaa8eb4df78ccb6b9a35fb4714 | 89d1525cbd45d039ac3ada31e075a6a21438fed0 | refs/heads/master | 2023-04-07T12:25:10.240284 | 2021-04-15T11:40:45 | 2021-04-15T11:40:45 | 358,022,840 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/local/bin/python3
from script import *
package_manager = pkg_manager
c = spawn(f"Bundling with {package_manager}...")
shell(f"{package_manager} run build")
c.done() | UTF-8 | Python | false | false | 176 | 7 | build | 5 | 0.704545 | 0.698864 | 0 | 10 | 16.7 | 48 |
|
Inspector007/EmployeeManagementSystem | 2,731,599,226,576 | 793f8ab08f7c8c2eba80896e42879cc0133029fc | a2898e8922d11c4ac04847373969eea12cfa0a68 | /restapi/serializers.py | 53c0ec5e773cc4188abadc348b8f51aca7497202 | []
| no_license | https://github.com/Inspector007/EmployeeManagementSystem | 39c043116782e713fe5e48157637ff08bb617b7f | 25cab900d988775af298d39c1c53fb4497b87c7d | refs/heads/master | 2020-06-11T08:02:10.174968 | 2018-11-26T05:23:27 | 2018-11-26T05:23:27 | 75,727,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from empapp.models import Employee
class EmployeeSerializers(serializers.ModelSerializer):
class Meta:
model = Employee
fields = ('empName', 'empEmail', 'empPan')
class EmployeeUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class EmployeeGroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name') | UTF-8 | Python | false | false | 582 | py | 12 | serializers.py | 7 | 0.707904 | 0.707904 | 0 | 20 | 28.15 | 70 |
vijay033/LegoMaker | 13,048,110,680,502 | 36fcecc1503eb30911e360bfdbe2a85c282e7389 | a9356a0ac1afd3f7237e3ec8ec780bfde58947a4 | /LEGOMAKER/server.py | 39f06cc4b9334399cf3359d68bd515323d55d201 | []
| no_license | https://github.com/vijay033/LegoMaker | 8f632820357be1e8c4d1d631535d9c571fe73886 | 8c53ec91f6968caa85a0d14f1c4075e7ccf06ff0 | refs/heads/master | 2022-09-12T12:48:04.161362 | 2020-06-01T01:18:02 | 2020-06-01T01:18:02 | 261,515,585 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import zerorpc
from image import Image
import os
import glob
from color_generator import Color_Generator
from filters import *
IP = 'tcp://0.0.0.0'
PORT = '4000'
class MosaicServer(object):
"""
Creates a server for generating Mosaics as a service, using zerorpc.
"""
def __init__(self):
super(MosaicServer, self).__init__()
color_generator = Color_Generator()
def load_color_palettes():
pattern = os.path.dirname(__file__) + '/resources/*.csv'
for filename in glob.iglob(pattern):
palette_name = os.path.splitext(os.path.basename(filename))[0]
color_generator.load_palette(palette_name, filename)
load_color_palettes()
self.color_generator = color_generator
def generate_mosaic(self, input_base64, num_clusters, img_length, tile_size, palette_name):
"""
Generates a mosaic from a base64 filedata string.
"""
img = Image(img_length)
img \
.load_str(input_base64) \
.apply_filter(QuantizeFilter(num_clusters)) \
.apply_filter(ConstrainPaletteFilter(self.color_generator, palette_name)) \
.apply_filter(BuildMapFilter(tile_size))
output_base64 = img.dump_str_base64('png')
print ('image generated..')
return output_base64
def main():
server = zerorpc.Server(MosaicServer())
server.bind(IP + ":" + PORT)
server.run()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,553 | py | 21 | server.py | 6 | 0.591114 | 0.577592 | 0 | 51 | 28.45098 | 95 |
fzk466569/python_tkinter | 3,848,290,718,181 | 7531f25e4ce9c2e2086453aa9da2993cba5cdeec | 4ad0cfa350552458df8a0270038ed436bd1d06f4 | /util/__init__.py | 5a82d334e231f4dc227935d589427c494c15d429 | []
| no_license | https://github.com/fzk466569/python_tkinter | 4b2e505f91bc4f73d632bb4fe029bd3a3b07c590 | 8c63ac171d171cd13c7891426841279f2ef53262 | refs/heads/master | 2021-01-21T11:26:38.127214 | 2017-08-31T13:15:27 | 2017-08-31T13:15:27 | 102,001,271 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2017/5/3 15:31
import hashlib
def md5(str):
m = hashlib.md5()
m.update(str)
return m.hexdigest()
if __name__ == '__main__':
print(md5('111'.encode('utf-8'))) | UTF-8 | Python | false | false | 231 | py | 53 | __init__.py | 53 | 0.545455 | 0.467532 | 0 | 13 | 16.846154 | 37 |
iskandr/parakeet | 9,517,647,549,805 | ddbe0f30b12a73f335bc7e82536554524ba8ff98 | edf31957838a65e989d5eb5e8118254ac2413fc8 | /parakeet/analysis/offset_analysis.py | b6b163fb97b451bba47f1caf75776778a1152030 | [
"BSD-3-Clause"
]
| permissive | https://github.com/iskandr/parakeet | e35814f9030b9e8508a7049b62f94eee5b8c5296 | d9089f999cc4a417d121970b2a447d5e524a3d3b | refs/heads/master | 2021-07-18T19:03:05.666898 | 2019-03-13T17:20:20 | 2019-03-13T17:20:20 | 5,889,813 | 69 | 7 | NOASSERTION | false | 2021-07-17T21:43:03 | 2012-09-20T16:54:18 | 2021-07-15T15:01:11 | 2019-03-13T17:20:21 | 8,168 | 232 | 13 | 1 | Python | false | false | from .. import prims
from .. syntax import Var, PrimCall, Const
from syntax_visitor import SyntaxVisitor
class OffsetAnalysis(SyntaxVisitor):
"""Determine static offset relationships between variables"""
def __init__(self):
# map from variable 'x' to list of variable offset pairs
# [ ('y', 4), ('z', -1), ...]
self.known_offsets = {}
def update(self, x, y, k):
if x == y:
assert k == 0, \
"Impossible %s = %s + %d" % (x,y,k)
if x in self.known_offsets:
x_offsets = self.known_offsets[x]
else:
x_offsets = set([])
x_offsets.add( (y,k) )
if y in self.known_offsets:
y_offsets = self.known_offsets[y]
else:
y_offsets = set([])
for (z, k2) in y_offsets:
x_offsets.add( (z, k2 + k) )
self.known_offsets[x] = x_offsets
self.known_offsets[y] = y_offsets
def visit_merge(self, merge):
for (k, (l,r)) in merge.iteritems():
if l.__class__ is Var and \
r.__class__ is Var and \
l.name in self.known_offsets and \
r.name in self.known_offsets:
left = self.known_offsets[l.name]
right = self.known_offsets[r.name]
self.known_offsets[k] = left.intersection(right)
def visit_PrimCall(self, expr):
if expr.prim is prims.add:
x, y = expr.args
if x.__class__ is Var and y.__class__ is Const:
return (x.name, int(y.value))
elif y.__class__ is Var and x.__class__ is Const:
return (y.name, int(x.value))
elif expr.prim is prims.subtract:
x, y = expr.args
if x.__class__ is Var and y.__class__ is Const:
return (x.name, -int(y.value))
return None
def visit_Assign(self, stmt):
if stmt.lhs.__class__ is Var:
if stmt.rhs.__class__ is PrimCall:
rhs = self.visit_PrimCall(stmt.rhs)
if rhs is not None:
x = stmt.lhs.name
(y, offset) = rhs
self.update(x, y, offset)
self.update(x, y, offset)
self.update(y, x, -offset)
elif stmt.rhs.__class__ is Var:
x = stmt.lhs.name
y = stmt.rhs.name
self.update(x, y, 0)
self.update(y, x, 0)
def visit_fn(self, fn):
SyntaxVisitor.visit_fn(self, fn)
return self.known_offsets
| UTF-8 | Python | false | false | 2,299 | py | 321 | offset_analysis.py | 315 | 0.555894 | 0.552849 | 0 | 77 | 28.844156 | 63 |
kaifsadri/AdventOfCode2020 | 6,665,789,289,553 | 4bd37e01ad88bd4ef3229ed13e586827bbfad934 | a7fde19cd8fd33552bebfa945bed9e6d557ccea6 | /03_1.py | 4cfa4f57cd88e652fc058c7cd0852c5c92ae5c55 | []
| no_license | https://github.com/kaifsadri/AdventOfCode2020 | cfe4667bc6f781c459190887ba107d0b9e4e3be6 | e9cab48f5841c029d3984616ac0ac2f61a14f1b4 | refs/heads/main | 2023-02-04T11:07:24.918261 | 2020-12-27T01:22:52 | 2020-12-27T01:22:52 | 318,400,525 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | L = open("input_03.txt").readlines()
right = 3
down = 1
x, y = 0, 0
l = len(L[0]) - 1
result = 0
while y < len(L):
if "#" == L[y][x % l]:
result += 1
x += right
y += down
print(f"Answer is: {result}") | UTF-8 | Python | false | false | 224 | py | 41 | 03_1.py | 41 | 0.477679 | 0.433036 | 0 | 16 | 13.0625 | 36 |
OnabanjoTitus/Python | 8,744,553,438,789 | 3b26131a34ea9298655d5133668b6bde1c688ffd | 79a19dcb0b3485416b5a216a0c762756d744c6eb | /chapter3Exercises/Palindromes.py | abc184c51c26cbfc1c2f433c11609af0d837d863 | []
| no_license | https://github.com/OnabanjoTitus/Python | a18b51395428c6f8fcd0a6009256f253efea38a0 | 6e1fbd2e73d0af0da8503ae10d8afad810127468 | refs/heads/main | 2023-04-19T03:09:31.033037 | 2021-04-25T22:02:43 | 2021-04-25T22:02:43 | 330,201,801 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import SeparatingTheDigitsInAnInteger
if SeparatingTheDigitsInAnInteger.number1 == SeparatingTheDigitsInAnInteger.number8 and \
SeparatingTheDigitsInAnInteger.number3 == SeparatingTheDigitsInAnInteger.number7:
print(SeparatingTheDigitsInAnInteger.number, "Is a palindrome")
else:
print(SeparatingTheDigitsInAnInteger.number, "Is not a Palindrome") | UTF-8 | Python | false | false | 368 | py | 35 | Palindromes.py | 35 | 0.826087 | 0.815217 | 0 | 6 | 59.666667 | 89 |
jannotti/special-a-sudoku | 16,123,307,230,174 | 493d1443f237f793c4f941e10953f7ec3ae44e58 | 8c66216bf7ca7a05c1535142e400cf6962df7288 | /check.py | 0b6d17ba069357cc338d6121b1b74263a6f03e85 | []
| no_license | https://github.com/jannotti/special-a-sudoku | 2376b53f6cf31d98a4698c4f50ae3df66995afa2 | 35026f31a8b447fd1301667afbde7661c92be30b | refs/heads/master | 2020-03-23T09:53:16.340741 | 2018-08-17T12:14:54 | 2018-08-17T12:14:54 | 141,413,199 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def digits(s):
d=[]
for ch in s:
if ch.isdigit():
d.append(int(ch))
else:
d.append(None)
return d
def board(d):
for row in range(9):
a = d[row*9:(row+1)*9]
b.append(string_int(a))
return b
def row_ok (digits,r):
for row in range(9):
mylist = []
for k in b:
if k not in mylist:
mylist.append(k)
else:
if type(k) == int:
return True
return False
def box_ok (board,b):
myset = []
lst = []
for row in range(4):
for row in board:
a = d[row*3:(row+1)*3]
b.append(row_int(a))
for row in board:
lst.append(row[co])
return not repeat_int(lst)
for col in range(4):
for col in lst:
c = d[col*3:(col+1)*3]
d.append(col_int(c))
return not repeat_int(bc)
print(row_ok(["12345_"],1))
print(row_ok([[1,2,None,2],[1,2,3,4],[1,2,2,3]],3))
| UTF-8 | Python | false | false | 967 | py | 19 | check.py | 10 | 0.475698 | 0.44364 | 0 | 44 | 20.977273 | 51 |
tjslezak/pyform | 14,568,529,082,874 | d462b8f7ba848756bbefb7eab25856d485fe2107 | d3ac994d49db9a6f44fe91d427362f7f7fd02329 | /outline_analysis/read_tps_outlines.py | 23975e2a4668c2fdc05d3fe877b08e84ef6021ea | [
"BSD-3-Clause"
]
| permissive | https://github.com/tjslezak/pyform | 8794e9e1e3ceae804a02a89eef9233fe4154c91c | 4741f29b506e13817b3f959385f8c998ba6af6a9 | refs/heads/master | 2021-09-07T00:04:55.560534 | 2018-02-13T21:31:49 | 2018-02-13T21:31:49 | 103,463,758 | 0 | 1 | BSD-3-Clause | false | 2018-02-13T21:31:50 | 2017-09-13T23:59:02 | 2017-10-10T01:40:05 | 2018-02-13T21:31:50 | 4 | 0 | 1 | 0 | Python | false | null | import pandas as pd
import numpy as np
def pyTPStoDict(tpsPath):
tpsFile = open(tpsPath, 'r')
tpsDict = {'id':[], 'image':[], 'x':[], 'y':[], 'lm':[], 'scale': []}
numLand = 0
for line in tpsFile:
if line[0] == 'L':
numLand = int(line.split('=')[1])
temp = [int(line.split('=')[1])]
tpsDict['lm'].extend([item for item in temp for i in range(numLand)])
elif line[:2] == 'IM':
imageFu = line.split('=')[1]
imageFu.rstrip()
imageFu = imageFu.split('.')[0]
temp = [imageFu]
tpsDict['image'].extend([item for item in temp for i in range(numLand)])
elif line[:2] == 'ID':
temp = [int(line.split('=')[1])]
tpsDict['id'].extend([item for item in temp for i in range(numLand)])
elif line[0] == 'S':
temp = [float(line.split('=')[1])]
tpsDict['scale'].extend([item for item in temp for i in range(numLand)])
else:
tempxy = line.split(' ')
tpsDict['x'].append(float(tempxy[0]))
tpsDict['y'].append(float(tempxy[1]))
tpsFile.close()
return tpsDict
def main():
tpsFilePath = "exaSpec.TPS"
nativePythonTPS = pyTPStoDict(tpsFilePath)
pandasTPS = pd.DataFrame(nativePythonTPS)
print(pandasTPS)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,380 | py | 8 | read_tps_outlines.py | 3 | 0.523188 | 0.513768 | 0 | 39 | 34.384615 | 84 |
lishaohsuai/FSK_communication | 10,290,741,676,956 | 0a4dc529d5cb7a9d2a2d27d9cca8264a1b523382 | 7b552713e527b73e66d21f3d0fb129a56a2ee55e | /source/test_audio.py | a62731c5e99a553752bc8f4fa6291860b24d778d | []
| no_license | https://github.com/lishaohsuai/FSK_communication | 4591594686063447a9d6042281755a927560c861 | c3929cc4c8cf2147413494b7f3b42e64ff810f97 | refs/heads/master | 2020-12-02T22:20:39.788698 | 2017-07-03T14:20:28 | 2017-07-03T14:20:28 | 96,117,647 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*-coding='utf-8'
import wave
from pyaudio import PyAudio,paInt16
from source import decode
class audio_test:
def __init__(self):
self.framerate=48000
self.NUM_SAMPLES=2000#9600#
self.channels=1
self.sampwidth = 2
self.TIME = 8
self.chunk = 1024
self.stream = None
# self.stream_open()
def save_wave_file(self,filename,data):
''' save teh date to the wavefile'''
wf=wave.open(filename,'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(self.sampwidth) # 暂时不清楚这个的作用
wf.setframerate(self.framerate) # 采样频率
wf.writeframes(b"".join(data))
wf.close()
def stream_open(self):
pa = PyAudio()
self.stream=pa.open(format = paInt16,channels=1,
rate=self.framerate,input = True,
frames_per_buffer=self.NUM_SAMPLES
)
# print(type(self.stream))
def stream_clase(self):
self.stream.close()
def my_record(self):
pa = PyAudio()
self.stream=pa.open(format = paInt16,channels=1,
rate=self.framerate,input = True,
frames_per_buffer=self.NUM_SAMPLES
)
my_buf=[]
count = 0
while count<self.TIME*20:#控制录音事件
string_audio_data = self.stream.read(self.chunk)
my_buf.append(string_audio_data)
count+=1
print('.',end = " ")
print('.')
print(my_buf)
self.save_wave_file('01.wav', my_buf)
return my_buf
# stream.close()
def play(self):
wf=wave.open(r"01.wav",'rb')
p=PyAudio()
stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),rate=wf.getframerate(),output=True)
number = wf.getnframes()
while True:
data = wf.readframes(1024)
number = number - self.chunk
if (number < self.chunk):
break
if data=="":break
stream.write(data)
stream.close()
p.terminate() # 释放资源
def ploy_data(y, t ,singal):
N_prntbits=singal.l_N_prntbits
Fdev=singal.l_Fdev
Fbit=singal.l_Fbit
Fs=singal.l_Fs
N_FFT = len(y) #// 10
pl.subplot(3,1,1)
# ff, tt, Sxx = si.spectrogram(y, Fs)
# pl.pcolormesh(tt, ff, Sxx) # 时间-频谱图
plt.specgram(y, NFFT=512, Fs=48000, noverlap=480)
pl.xlabel('Time (s)')
pl.ylabel('Frequency (Hz)')
pl.title('Original VCO output versus time')
pl.subplot(3,1,2)
pl.plot(t[0:Fs*N_prntbits//Fbit],y[0:Fs*N_prntbits//Fbit]) # 时间-幅度 完整波形图
pl.xlabel('Time (s)')
pl.ylabel('Amplitude (V)')
pl.title('Amplitude of carrier versus time')
pl.grid(True)
freqs = np.linspace(0, Fs//2, N_FFT//2 + 1) #
xs = y[:N_FFT] #* si.hann(N_FFT, sym=0)
xf = np.fft.rfft(xs)/N_FFT
xfp = 20*np.log10(np.clip(np.abs(xf), 1e-20, 1e100))# cut top and low
pl.subplot(3,1,3)
pl.plot(freqs,xfp[:len(freqs)])# 频谱图
pl.xlabel('Frequence (s)')
pl.ylabel('Amplitude (V)')
pl.title('Amplitude & Frequence')
pl.grid(True)
pl.show()
if __name__=="__main__":
test= audio_test()
test.my_record()
print("over")
test.play()
| UTF-8 | Python | false | false | 3,490 | py | 8 | test_audio.py | 8 | 0.535798 | 0.512031 | 0 | 110 | 29.963636 | 81 |
asnani04/Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks | 1,692,217,149,799 | e406fcd1a13235b7f09de5ba3e38411f218d708d | 3c91be419db18ead1f45f824e209418319eda249 | /model/screen/fer_demo.py | 33d9407c091ea9c44814efb811b6ed904ccebc7e | [
"MIT"
]
| permissive | https://github.com/asnani04/Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks | 0c37845ef7b9df2334bfdd141a373d87784dfa8d | 0b77fdbc4264210ad3f7ab99deb033d5d81bb1cd | refs/heads/master | 2022-11-10T20:29:50.282212 | 2020-06-22T23:09:56 | 2020-06-22T23:09:56 | 272,285,864 | 0 | 1 | MIT | true | 2020-06-14T21:24:44 | 2020-06-14T21:24:43 | 2020-05-16T21:18:33 | 2020-06-06T04:03:26 | 116,932 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GUI of the facial expression recognition (FER) demo
"""
__author__ = "Henrique Siqueira"
__email__ = "siqueira.hc@outlook.com"
__license__ = "MIT license"
__version__ = "1.0"
# External Libraries
import numpy as np
import cv2
# Modules
from model.utils import uimage
class FERDemo:
"""
This class implements the GUI of the facial expression recognition (FER) demo.
"""
# Default values
_DEFAULT_SCREEN_SIZE_ID = 1
_DEFAULT_WINDOW_NAME = "Siqueira_et_al_AAAI_2020"
_DEFAULT_DISPLAY_INDIVIDUAL_CLASSIFICATION = False
_DEFAULT_DISPLAY_GRAPH_ENSEMBLE = True
# Display
_SCREEN_SIZE = [(1920, 1080), (1440, 900), (1024, 768)]
_TEXT_PARAM_SCALE = [0.9, 0.8, 0.7]
_TEXT_PARAM_THICKNESS = [2, 2, 2]
_INPUT_IMAGE_SCALE_MAX = 0.9
_INPUT_IMAGE_SCALE_MIN = 0.4
# Display: blocks
_BLOCK_NUM_BLOCKS = 10 # Ensemble size
_BLOCK_INIT_POS_TEXT_NETWORK = [(0, 30), (0, 20), (0, 20)]
_BLOCK_INIT_POS_IMAGE = [(4, 170), (4, 145), (4, 125)]
_BLOCK_IMAGE_SIZE = [(100, 100), (75, 75), (60, 60)]
_BLOCK_INIT_POS_TEXT_EMOTION = [(300, 55), (240, 45), (195, 40)]
_BLOCK_INIT_POS_TEXT_AROUSAL = [(470, 40), (380, 25), (300, 25)]
_BLOCK_INIT_POS_TEXT_VALENCE = [(470, 85), (380, 65), (300, 55)]
_BLOCK_INIT_POS_BAR_AROUSAL = [(550, 15), (450, 5), (350, 7)]
_BLOCK_FINAL_POS_BAR_AROUSAL = [(920, 45), (700, 30), (500, 27)]
_BLOCK_INIT_POS_BAR_VALENCE = [(550, 60), (450, 45), (350, 42)]
_BLOCK_FINAL_POS_BAR_VALENCE = [(920, 90), (700, 70), (500, 62)]
# Ensemble only >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
_BLOCK_INIT_POS_TEXT_NETWORK_ENSEMBLE = [(10, 50), (10, 40), (10, 40)]
_BLOCK_INIT_POS_IMAGE_ENSEMBLE = [(80, 10), (60, 10), (60, 10)]
_BLOCK_IMAGE_SIZE_ENSEMBLE = [(200, 200), (150, 150), (120, 120)]
_BLOCK_INIT_POS_TEXT_EMOTION_ENSEMBLE = [(10, 350), (10, 270), (10, 220)]
_BLOCK_INIT_POS_TEXT_ACTIVATION = [(10, 420), (10, 330), (10, 260)]
_BLOCK_INIT_POS_TEXT_PLEASANT = [(10, 500), (10, 410), (10, 320)]
_BLOCK_INIT_POS_TEXT_UNPLEASANT = [(10, 580), (10, 490), (10, 380)]
_BLOCK_INIT_POS_BAR_ACTIVATION = [(10, 435), (10, 345), (10, 270)]
_BLOCK_FINAL_POS_BAR_ACTIVATION = [(600, 465), (450, 370), (300, 290)]
_BLOCK_INIT_POS_BAR_PLEASANT = [(10, 515), (10, 425), (10, 330)]
_BLOCK_FINAL_POS_BAR_PLEASANT = [(600, 545), (450, 450), (300, 350)]
_BLOCK_INIT_POS_BAR_UNPLEASANT = [(10, 595), (10, 505), (10, 390)]
_BLOCK_FINAL_POS_BAR_UNPLEASANT = [(600, 635), (450, 530), (300, 410)]
_BLOCK_INIT_POS_GRAPH = [(660, 10), (580, 10), (460, 10)]
_BLOCK_SAMPLE_GRAPH = 16
_BLOCK_THICKNESS_GRAPH = [3, 3, 3]
_BLOCK_FONT_SIZE_GRAPH = [14, 12, 10]
_BLOCK_OFFSET_GRAPH = [60, 60, 40]
_BLOCK_SIZE_GRAPH = [(8, 3.2), (7, 3), (5, 2.5)]
# Ensemble only <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# Display: maximum values
_MAX_AROUSAL = 1.0
_MAX_VALENCE = 1.0
# Colours GREYSCALE
_COLOUR_G_DARK_GREY = 50
# Colours BGR
_COLOUR_BGR_GREEN = (0, 255, 0)
_COLOUR_BGR_RED = (0, 0, 255)
_COLOUR_BGR_WHITE = (255, 255, 255)
_COLOUR_BGR_BLACK = (0, 0, 0)
_COLOUR_BGR_ORANGE = (0, 125, 255)
_COLOUR_BGR_BLUE = (255, 0, 0)
_COLOUR_BGR_DARK_RED = (0, 0, 130)
_COLOUR_BGR_DARK_GREEN = (60, 130, 0)
_COLOUR_BGR_DARK_BLUE = (130, 60, 0)
_COLOUR_BGR_DARK_GREY = (50, 50, 50)
# Messages
_TEXT_BLANK_INPUT = "No frame to process."
_TEXT_NO_FACE = "No face has been detected."
_TEXT_ENSEMBLE = "Ensemble:"
_TEXT_BRANCH = "Branch {}:"
_TEXT_AROUSAL = "Aro:"
_TEXT_VALENCE = "Val:"
_TEXT_ACTIVATION = "Activation:"
_TEXT_PLEASANT = "Pleasant:"
_TEXT_UNPLEASANT = "Unpleasant:"
_TEXT_ACTIVATION_WITHOUT_TWO_DOTS = "Activation"
_TEXT_PLEASANT_UNPLEASANT = "Pleasant / Unpleasant"
def __init__(self, window_name=_DEFAULT_WINDOW_NAME, screen_size=_DEFAULT_SCREEN_SIZE_ID, display_individual_classification=_DEFAULT_DISPLAY_INDIVIDUAL_CLASSIFICATION,
display_graph_ensemble=_DEFAULT_DISPLAY_GRAPH_ENSEMBLE):
"""
Initialize GUI of the FER demo.
:param window_name: (string) The name of the window
:param screen_size: ((int, int)) Tuple of int values for width and height, respectively.
"""
# Screen components
self._fer = None
self._input_image = None
self._background = None
self._plot_arousal = []
self._plot_valence = []
# Screen
self._window_name = window_name
self._screen_size = screen_size - 1
self._width, self._height = FERDemo._SCREEN_SIZE[self._screen_size]
self._display_individual_classification = display_individual_classification
self._display_graph_ensemble = display_graph_ensemble
# Container parameters
self._container_width, self._container_height = (int(self._width // 2), int(self._height))
self._container_center_position = np.array([self._container_width // 2, self._container_height // 2], dtype=np.int)
self._input_container = None
self._output_container = None
self._input_container_initial_position = np.array([0, 0], dtype=np.int)
self._output_container_initial_position = np.array([0, self._width // 2], dtype=np.int)
# Output blocks
self._output_block_height = (self._container_height // FERDemo._BLOCK_NUM_BLOCKS)
self._output_block_height_ensemble = self._container_height
self._output_block_width = self._container_width
# Screen initialization
self._draw_background()
self._screen = self._get_container(0, 0, self._height, self._width)
self._blank_screen()
cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE)
def _blank_screen(self):
"""
Create a blank screen without an input image and outputs.
"""
self._draw_input_container(True)
self._draw_output_container(True)
self._draw_screen()
def _draw_screen(self):
self._screen[:, :self._output_container_initial_position[1], :] = self._input_container
self._screen[:, self._output_container_initial_position[1]:, :] = self._output_container
def _draw_input_container(self, is_blank):
self._input_container = self._get_container(0, 0, self._container_height, self._container_width)
if is_blank:
uimage.draw_text(self._input_container,
FERDemo._TEXT_BLANK_INPUT,
self._container_center_position - 60,
FERDemo._COLOUR_BGR_WHITE,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
else:
# Compute resize factor 'f'
h, w, c = self._fer.input_image.shape
h_c, w_c, c_c = self._input_container.shape
h_ratio = h / h_c
w_ratio = w / w_c
if h_ratio > w_ratio:
if h < (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MIN):
f = (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MIN) / float(h)
else:
f = (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MAX) / float(h)
else:
if w < (self._container_height * FERDemo._INPUT_IMAGE_SCALE_MIN):
f = (self._container_width * FERDemo._INPUT_IMAGE_SCALE_MIN) / float(w)
else:
f = (self._container_width * FERDemo._INPUT_IMAGE_SCALE_MAX) / float(w)
# Resize input image
self._input_image = uimage.resize(self._fer.input_image, f=f)
# Set input image to the container
h, w, c = self._input_image.shape
x = int((self._container_height // 2) - (h // 2))
y = int((self._container_width // 2) - (w // 2))
self._input_container[x:(x + h), y:(y + w), :] = self._input_image
def _draw_output_container(self, is_blank):
self._output_container = self._get_container(0,
self._output_container_initial_position[1],
self._container_height,
self._container_width)
if is_blank:
uimage.draw_text(self._output_container,
FERDemo._TEXT_BLANK_INPUT,
self._container_center_position - 60,
FERDemo._COLOUR_BGR_WHITE,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
else:
if self._fer.face_image is None:
uimage.draw_text(self._output_container,
FERDemo._TEXT_NO_FACE,
self._container_center_position - 210,
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
else:
# Display ensemble and individual classifications
if self._display_individual_classification:
# Resize face image
face_image = uimage.resize(self._fer.face_image, FERDemo._BLOCK_IMAGE_SIZE[self._screen_size])
# Generate block of the ensemble prediction
block = self._generate_block(FERDemo._TEXT_ENSEMBLE,
self._fer.list_emotion[-1],
self._fer.list_affect[-1][0],
self._fer.list_affect[-1][1],
face_image=face_image,
x=0,
y=self._output_container_initial_position[1])
# Draw block ot the ensemble prediction
uimage.draw_image(self._output_container, block, (0, 0))
# Branches
for branch in range(len(self._fer.list_emotion) - 1):
# Superimpose saliency map on input face image
grad_cam = self._fer.get_grad_cam(branch)
if not (grad_cam is None):
grad_cam = uimage.superimpose(grad_cam, face_image)
# Generate block of the branch prediction
block = self._generate_block(FERDemo._TEXT_BRANCH.format(branch + 1),
self._fer.list_emotion[branch],
self._fer.list_affect[branch][0],
self._fer.list_affect[branch][1],
grad_cam,
x=self._output_block_height * (branch + 1),
y=self._output_container_initial_position[1])
# Draw block of the branch prediction
uimage.draw_image(self._output_container, block, (self._output_block_height * (branch + 1), 0))
# Display ensemble classification in detail
else:
# Ensemble
face_image = uimage.resize(self._fer.face_image, FERDemo._BLOCK_IMAGE_SIZE_ENSEMBLE[self._screen_size])
block = self._generate_block_ensemble(FERDemo._TEXT_ENSEMBLE, self._fer.list_emotion[-1], self._fer.list_affect[-1][0], self._fer.list_affect[-1][1], face_image=face_image, x=0, y=self._output_container_initial_position[1])
uimage.draw_image(self._output_container, block, (0, 0))
def _generate_block(self, network_name, emotion, valence, arousal, face_image=None, x=0, y=0):
block = self._get_container(x, y, self._output_block_height, self._output_block_width)
# Image
if not (face_image is None):
uimage.draw_image(block, face_image, FERDemo._BLOCK_INIT_POS_IMAGE[self._screen_size])
# Text: Ensemble
uimage.draw_text(block,
network_name,
FERDemo._BLOCK_INIT_POS_TEXT_NETWORK[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Emotion
uimage.draw_text(block,
emotion,
FERDemo._BLOCK_INIT_POS_TEXT_EMOTION[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Arousal
uimage.draw_text(block,
FERDemo._TEXT_AROUSAL,
FERDemo._BLOCK_INIT_POS_TEXT_AROUSAL[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Valence
uimage.draw_text(block,
FERDemo._TEXT_VALENCE,
FERDemo._BLOCK_INIT_POS_TEXT_VALENCE[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Bar: Arousal
uimage.draw_horizontal_bar(block,
arousal,
FERDemo._MAX_AROUSAL,
FERDemo._BLOCK_INIT_POS_BAR_AROUSAL[self._screen_size],
FERDemo._BLOCK_FINAL_POS_BAR_AROUSAL[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size],
FERDemo._COLOUR_BGR_DARK_BLUE)
# Bar: Valence
uimage.draw_horizontal_bar(block,
np.abs(valence),
FERDemo._MAX_VALENCE,
FERDemo._BLOCK_INIT_POS_BAR_VALENCE[self._screen_size],
FERDemo._BLOCK_FINAL_POS_BAR_VALENCE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size],
FERDemo._COLOUR_BGR_DARK_RED if valence < 0.0 else FERDemo._COLOUR_BGR_DARK_GREEN)
return block
def _generate_block_ensemble(self, network_name, emotion, valence, arousal, face_image=None, x=0, y=0):
block = self._get_container(x, y, self._output_block_height_ensemble, self._output_block_width)
# Image
if not (face_image is None):
uimage.draw_image(block, face_image, FERDemo._BLOCK_INIT_POS_IMAGE_ENSEMBLE[self._screen_size])
# Text: Ensemble
uimage.draw_text(block,
network_name,
FERDemo._BLOCK_INIT_POS_TEXT_NETWORK_ENSEMBLE[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Emotion
uimage.draw_text(block,
emotion,
FERDemo._BLOCK_INIT_POS_TEXT_EMOTION_ENSEMBLE[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Activation
uimage.draw_text(block,
FERDemo._TEXT_ACTIVATION + " {:.2f}".format(arousal),
FERDemo._BLOCK_INIT_POS_TEXT_ACTIVATION[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Pleasant
uimage.draw_text(block,
FERDemo._TEXT_PLEASANT + (" 0.00" if valence < 0 else " {:.2f}".format(valence)),
FERDemo._BLOCK_INIT_POS_TEXT_PLEASANT[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Text: Unpleasant
uimage.draw_text(block,
FERDemo._TEXT_UNPLEASANT + (" {:.2f}".format(valence) if valence < 0 else " 0.00"),
FERDemo._BLOCK_INIT_POS_TEXT_UNPLEASANT[self._screen_size],
FERDemo._COLOUR_BGR_BLACK,
FERDemo._TEXT_PARAM_SCALE[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size])
# Bar: Activation
uimage.draw_horizontal_bar(block,
arousal,
FERDemo._MAX_AROUSAL,
FERDemo._BLOCK_INIT_POS_BAR_ACTIVATION[self._screen_size],
FERDemo._BLOCK_FINAL_POS_BAR_ACTIVATION[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size],
FERDemo._COLOUR_BGR_DARK_BLUE)
# Bar: Pleasant
uimage.draw_horizontal_bar(block,
0.0 if valence < 0.0 else valence,
FERDemo._MAX_VALENCE,
FERDemo._BLOCK_INIT_POS_BAR_PLEASANT[self._screen_size],
FERDemo._BLOCK_FINAL_POS_BAR_PLEASANT[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size],
FERDemo._COLOUR_BGR_DARK_GREEN)
# Bar: Unpleasant
uimage.draw_horizontal_bar(block,
np.abs(valence) if valence < 0.0 else 0.0,
FERDemo._MAX_VALENCE,
FERDemo._BLOCK_INIT_POS_BAR_UNPLEASANT[self._screen_size],
FERDemo._BLOCK_FINAL_POS_BAR_UNPLEASANT[self._screen_size],
FERDemo._TEXT_PARAM_THICKNESS[self._screen_size],
FERDemo._COLOUR_BGR_DARK_RED)
# Plot: Arousal and Valence
if self._display_graph_ensemble:
self._plot_arousal.append(arousal)
self._plot_valence.append(valence)
uimage.draw_graph(block, self._plot_arousal, self._plot_valence,
FERDemo._BLOCK_INIT_POS_GRAPH[self._screen_size],
FERDemo._BLOCK_SAMPLE_GRAPH,
FERDemo._TEXT_ACTIVATION_WITHOUT_TWO_DOTS,
FERDemo._TEXT_PLEASANT_UNPLEASANT,
FERDemo._COLOUR_BGR_BLUE,
FERDemo._COLOUR_BGR_ORANGE,
FERDemo._BLOCK_THICKNESS_GRAPH[self._screen_size],
FERDemo._BLOCK_OFFSET_GRAPH[self._screen_size],
FERDemo._BLOCK_FONT_SIZE_GRAPH[self._screen_size],
FERDemo._COLOUR_BGR_DARK_GREY,
FERDemo._BLOCK_SIZE_GRAPH[self._screen_size])
return block
def _draw_background(self):
if (self._fer is None) or (self._fer.input_image is None):
self._background = np.ones((self._height, self._width, 3), dtype=np.uint8) * FERDemo._COLOUR_G_DARK_GREY
else:
# Resize
self._background = uimage.resize(self._fer.input_image, f=np.maximum(
np.maximum(self._fer.input_image.shape[0] / self._height, self._fer.input_image.shape[1] / self._width),
np.maximum(self._height / self._fer.input_image.shape[0], self._width / self._fer.input_image.shape[1])
))[: self._height,:self._width,:]
# Blur
self._background = uimage.blur(uimage.blur(self._background, 40), 20)
# Brightness
mean = np.mean(self._background)
gamma = 0.75 if mean > 100 else 1.5
mean = mean if mean > 50 else 100
self._background = np.clip((gamma * self._background) + mean, 0, 255).astype(np.uint8)
def _get_container(self, x, y, h, w):
return np.array(self._background[x:x+h, y:y+w, :])
def update(self, fer):
"""
Update screen.
:param fer: (model.ml.fer.FER) An FER object.
:return: void
"""
self._fer = fer
# Background
self._draw_background()
self._draw_input_container(self._fer is None)
self._draw_output_container(self._fer is None)
self._draw_screen()
def show(self):
cv2.imshow(self._window_name, self._screen)
def save(self):
cv2.imwrite("new_image.jpg", self._screen)
def is_running(self):
return (cv2.waitKey(1) != 27) and (cv2.getWindowProperty(self._window_name, cv2.WND_PROP_VISIBLE) >= 1)
def quit(self):
cv2.destroyWindow(self._window_name)
| UTF-8 | Python | false | false | 22,090 | py | 17 | fer_demo.py | 15 | 0.515844 | 0.488411 | 0 | 466 | 46.403433 | 243 |
goodok/sympy | 3,229,815,412,790 | b8f2cb5959aefc41ec35ab0a8f2b51d9b23757bf | 270547cf06736356d7a6cdabd22a8395edaa8b59 | /sympy/polys/tests/test_partfrac.py | 4f4093373196f1c887bfb381e3b09574f3e9679c | [
"BSD-3-Clause"
]
| permissive | https://github.com/goodok/sympy | f56fcc4a862ad3d4c5c09ad792ac576dc8f74594 | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | refs/heads/master | 2021-01-16T18:50:41.860305 | 2012-03-20T22:40:49 | 2012-03-20T22:40:49 | 1,414,177 | 3 | 0 | null | true | 2013-01-18T08:19:10 | 2011-02-26T09:19:37 | 2012-12-25T06:38:07 | 2012-05-10T17:51:55 | 180 | null | 0 | 2 | Python | null | null | """Tests for algorithms for partial fraction decomposition of rational
functions. """
from sympy.polys.partfrac import (
apart_undetermined_coeffs,
apart_full_decomposition,
apart,
)
from sympy import S, Poly, E, pi, I, Matrix, Eq, RootSum, Lambda, factor, together
from sympy.utilities.pytest import raises
from sympy.abc import x, y, a, b, c
def test_apart():
assert apart(1) == 1
assert apart(1, x) == 1
f, g = (x**2 + 1)/(x + 1), 2/(x + 1) + x - 1
assert apart(f, full=False) == g
assert apart(f, full=True) == g
f, g = 1/(x+2)/(x+1), 1/(1 + x) - 1/(2 + x)
assert apart(f, full=False) == g
assert apart(f, full=True) == g
f, g = 1/(x+1)/(x+5), -1/(5 + x)/4 + 1/(1 + x)/4
assert apart(f, full=False) == g
assert apart(f, full=True) == g
assert apart((E*x+2)/(x-pi)*(x-1), x) == \
2 - E + E*pi + E*x + (E*pi + 2)*(pi - 1)/(x - pi)
assert apart(Eq((x**2 + 1)/(x + 1), x), x) == Eq(x - 1 + 2/(x + 1), x)
raises(NotImplementedError, "apart(1/(x + 1)/(y + 2))")
def test_apart_matrix():
M = Matrix(2, 2, lambda i, j: 1/(x + i + 1)/(x + j))
assert apart(M) == Matrix([
[1/x - 1/(x + 1), (x + 1)**(-2) ],
[1/(2*x) - (S(1)/2)/(x + 2), 1/(x + 1) - 1/(x + 2)],
])
def test_apart_symbolic():
f = a*x**4 + (2*b + 2*a*c)*x**3 + (4*b*c - a**2 + a*c**2)*x**2 + (-2*a*b + 2*b*c**2)*x - b**2
g = a**2*x**4 + (2*a*b + 2*c*a**2)*x**3 + (4*a*b*c + b**2 + a**2*c**2)*x**2 + (2*c*b**2 + 2*a*b*c**2)*x + b**2*c**2
assert apart(f/g, x) == 1/a - 1/(x + c)**2 - b**2/(a*(a*x + b)**2)
assert apart(1/((x + a)*(x + b)*(x + c)), x) == \
1/((a - c)*(b - c)*(c + x)) - 1/((a - b)*(b - c)*(b + x)) + 1/((a - b)*(a - c)*(a + x))
def test_apart_extension():
f = 2/(x**2 + 1)
g = I/(x + I) - I/(x - I)
assert apart(f, extension=I) == g
assert apart(f, gaussian=True) == g
f = x/((x - 2)*(x + I))
assert factor(together(apart(f))) == f
def test_apart_full():
f = 1/(x**2 + 1)
assert apart(f, full=False) == f
assert apart(f, full=True) == -RootSum(x**2 + 1, Lambda(a, a/(x - a)), auto=False)/2
f = 1/(x**3 + x + 1)
assert apart(f, full=False) == f
assert apart(f, full=True) == RootSum(x**3 + x + 1, Lambda(a, (6*a**2/31 - 9*a/31 + S(4)/31)/(x - a)), auto=False)
f = 1/(x**5 + 1)
assert apart(f, full=False) == \
(-S(1)/5)*((x**3 - 2*x**2 + 3*x - 4)/(x**4 - x**3 + x**2 - x + 1)) + (S(1)/5)/(x + 1)
assert apart(f, full=True) == \
-RootSum(x**4 - x**3 + x**2 - x + 1, Lambda(a, a/(x - a)), auto=False)/5 + (S(1)/5)/(x + 1)
def test_apart_undetermined_coeffs():
p = Poly(2*x - 3)
q = Poly(x**9 - x**8 - x**6 + x**5 - 2*x**2 + 3*x - 1)
r = (-x**7 - x**6 - x**5 + 4)/(x**8 - x**5 - 2*x + 1) + 1/(x - 1)
assert apart_undetermined_coeffs(p, q) == r
p = Poly(1, x, domain='ZZ[a,b]')
q = Poly((x + a)*(x + b), x, domain='ZZ[a,b]')
r = 1/((x + b)*(a - b)) + 1/((x + a)*(b - a))
assert apart_undetermined_coeffs(p, q) == r
| UTF-8 | Python | false | false | 3,066 | py | 212 | test_partfrac.py | 157 | 0.458252 | 0.403131 | 0 | 97 | 30.608247 | 119 |
arozans/idenface | 3,427,383,907,518 | 608fe16ce7dd258db1337f7683189d416db31cd1 | 604005d7d1d5473e5c92ae93bd9e606a47e796dc | /test/data/tfrecord/conftest.py | e2bd4ea97c50d9cc83298ca77ffd8e79f60a363e | []
| no_license | https://github.com/arozans/idenface | 278fe98c0360efdd73c617e7728fba27b35a2eda | fd3cdd0d1e112634a35b5f571d1acedf518ff3dc | refs/heads/master | 2020-04-30T12:10:48.018092 | 2019-07-09T20:54:24 | 2019-07-09T20:54:24 | 176,820,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pytest
from src.utils import utils, consts
from testing_utils import tf_helpers
tol = 1.e-2
@pytest.fixture()
def thor_image_path(patched_home_dir):
import urllib.request
thor_path = patched_home_dir / "downloaded/thor_is_here.png"
thor_path.parent.mkdir()
image_address = 'https://i.stack.imgur.com/Cr57x.png'
# image_address = 'https://liquipedia.net/commons/images/0/0d/ThorCE.jpg'
urllib.request.urlretrieve(image_address, thor_path)
assert utils.check_filepath(thor_path, exists=True, is_directory=False, is_empty=False)
yield str(thor_path)
thor_path.unlink()
def _check_paired_result(first_batch, expected_images_values, labels):
left_images, right_images, pair_labels, left_labels, right_labels = tf_helpers.unpack_batch(first_batch)
assert len(left_images) == len(right_images) == len(pair_labels) == len(left_labels) == len(
right_labels) == len(expected_images_values[0])
for left_image, left_expected in zip(left_images, expected_images_values[0]):
assert np.allclose(left_image + 0.5, left_expected, rtol=tol, atol=tol)
for right_image, right_expected in zip(right_images, expected_images_values[1]):
assert np.allclose(right_image + 0.5, right_expected, rtol=tol, atol=tol)
assert (pair_labels == labels[consts.PAIR_LABEL]).all()
assert (left_labels == labels[consts.LEFT_FEATURE_LABEL]).all()
assert (right_labels == labels[consts.RIGHT_FEATURE_LABEL]).all()
def _check_result(first_batch, expected_images_values, labels):
images, unpack_labels = tf_helpers.unpack_batch(first_batch)
assert len(images) == len(unpack_labels) == len(expected_images_values)
for image, expected in zip(images, expected_images_values):
assert np.allclose(image + 0.5, expected, rtol=tol, atol=tol)
assert (unpack_labels == labels[consts.LABELS]).all()
| UTF-8 | Python | false | false | 1,893 | py | 91 | conftest.py | 89 | 0.708399 | 0.700475 | 0 | 41 | 45.170732 | 108 |
kinggodhj/assignment | 2,336,462,254,927 | fad9187fd562a1d7b64414a709cb4e3cde088d36 | d54efcf18d2860adc7980bda3b7c77f74014b830 | /test_custom.py | 6b753f1570dfbc48f5114ab694cf4426c2cfdaab | []
| no_license | https://github.com/kinggodhj/assignment | 30717d149f7f74ee653659a8b27699f76ac29ba5 | e8791f32080341a4cd318325ed13ca19234f7ee0 | refs/heads/master | 2023-05-28T00:19:21.986520 | 2021-06-09T15:01:05 | 2021-06-09T15:01:05 | 373,506,264 | 0 | 0 | null | false | 2021-06-07T12:12:26 | 2021-06-03T12:52:40 | 2021-06-07T10:04:17 | 2021-06-07T12:12:25 | 311,548 | 0 | 0 | 0 | Python | false | false | import sys
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import math
from nltk.translate.bleu_score import sentence_bleu
import time
import pdb
from model_custom import Seq2SeqTransformer, create_mask, greedy_decode, greedy_decode2
from prepare import build_vocab, setup
DEVICE = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--max_len', type=int, default=100)
parser.add_argument('--emb_size', type=int, default=64)
parser.add_argument('--nhead', type=int, default=8)
parser.add_argument('--ffd_dim', type=int, default=64)
parser.add_argument('--num_encoder_layers', type=int, default=1)
parser.add_argument('--num_decoder_layers', type=int, default=1)
parser.add_argument('--path', type=str, default='./model/')
parser.add_argument('--epochs', type=int, default=50)
args = parser.parse_args()
MAX_LEN = args.max_len
EMB_SIZE = args.emb_size
NHEAD = args.nhead
FFN_HID_DIM = args.ffd_dim
NUM_ENCODER_LAYERS = args.num_encoder_layers
NUM_DECODER_LAYERS = args.num_decoder_layers
NUM_EPOCHS = args.epochs
PATH = args.path
sys.stdout = open('./generated/batch/%spremodel%s%s%s'%(args.batch_size, NUM_EPOCHS, EMB_SIZE, NUM_ENCODER_LAYERS), 'w')
def get_bleu(model, vocab, test_iter):
model.eval()
bleu = 0
losses = 0
for idx, (src, tgt) in enumerate(test_iter):
src = src.to(DEVICE)
tgt = tgt.to(DEVICE)
tgt_input = tgt[:-1, :]
src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input, PAD)
tgt_out = tgt[1:,:]
ys = greedy_decode(model, src, src_mask, MAX_LEN, BOS, EOS)
logits = greedy_decode2(model, src, src_mask, tgt_out.size(0), BOS, EOS)
loss_fn = torch.nn.CrossEntropyLoss()
loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
losses += loss.item()
target = tgt.tolist()
target = sum(target, [])
target = list(map(str, target))
target = target[1:-1]
pred = ys.tolist()
pred = sum(pred, [])
pred = pred[1:-1]
generated = []
for p in pred:
generated.append(vocab.itos[p])
print(*generated, sep=" ")
def evaluate(model, test_iter):
model.eval()
losses = 0
for idx, (src, tgt) in enumerate(test_iter):
src = src.to(DEVICE)
tgt = tgt.to(DEVICE)
tgt_input = tgt[:-1, :]
src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input, PAD)
logits = model(src, tgt_input, src_mask, tgt_mask, src_padding_mask, tgt_padding_mask, src_padding_mask)
tgt_out = tgt[1:,:]
loss_fn = torch.nn.CrossEntropyLoss()
loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
losses += loss.item()
return losses / len(test_iter)
if __name__ == "__main__":
train_data, voca_x, voca_y = setup("./train_x.0.txt", "./train_y.0.txt", MAX_LEN)
source_file = "./test_source.txt"
target_file = "./test_target.txt"
test_data, _, _ = setup(source_file, target_file, MAX_LEN)
train_iter = DataLoader(train_data, batch_size=1, shuffle=True, collate_fn=train_data.get_batch)
test_iter = DataLoader(test_data, batch_size=1, shuffle=True, collate_fn=test_data.get_batch)
SRC_VOCAB_SIZE = len(voca_x)
TGT_VOCAB_SIZE = len(voca_y)
EOS = voca_x['<eos>']
BOS = voca_x['<bos>']
PAD = voca_x['<pad>']
transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, NHEAD, EMB_SIZE, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, FFN_HID_DIM)
transformer = transformer.to(DEVICE)
transformer.load_state_dict(torch.load(PATH))
#train_bleu = get_bleu(transformer, train_iter)
get_bleu(transformer, voca_y, test_iter)
#loss = evaluate(transformer, test_iter)
#ppl = math.exp(loss)
#train_loss = evaluate(transformer, train_iter)
#train_ppl = math.exp(train_loss)
#print('Train PPL:', train_ppl, 'Test PPL:', ppl)
| UTF-8 | Python | false | false | 4,417 | py | 16 | test_custom.py | 6 | 0.616482 | 0.607426 | 0 | 135 | 30.718519 | 138 |
cjkpl/python-webpack-boilerplate | 1,425,929,164,511 | 1afddc3cd0160d690921cffb501fcf3a16fa2878 | f4b42f433d201aada16858e7bd99be771d2026f6 | /tests/tests_django/es6_scss/test_npm_commands.py | 044db33b57f894dae53c66340c52930d2e22bb3e | []
| no_license | https://github.com/cjkpl/python-webpack-boilerplate | 24517d0efc85e5d2b66c2b25f17b40bebde4102c | 322210e308df7ea3a9d9cef1fe2809916dc28ff8 | refs/heads/master | 2023-06-01T08:06:02.835579 | 2021-06-19T03:56:33 | 2021-06-19T03:56:33 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
def test_npm(npm_project_path, npm_build_commands):
js_path = npm_project_path / "build" / "js"
js_files = list(js_path.glob("*.*"))
js_files = [js_file.name for js_file in js_files]
assert len(js_files) > 0
js_files = ", ".join(js_files)
assert re.findall(r"app[.\w]*?.js", js_files)
assert re.findall(r"app2[.\w]*?.js", js_files)
css_path = npm_project_path / "build" / "css"
css_files = list(css_path.glob("*.*"))
css_files = [css_file.name for css_file in css_files]
assert len(css_files) > 0
css_files = ", ".join(css_files)
assert re.findall(r"app[.\w]*?.css", css_files)
vendor_path = npm_project_path / "vendors" / "images"
img_files = list(vendor_path.glob("*.*"))
img_files = [img_file.name for img_file in img_files]
assert len(img_files) > 0
img_files = ", ".join(img_files)
assert re.findall(r"sample.jpg", img_files)
assert re.findall(r"webpack.png", img_files)
| UTF-8 | Python | false | false | 971 | py | 34 | test_npm_commands.py | 15 | 0.609681 | 0.605561 | 0 | 27 | 34.962963 | 57 |
rryanburton/django-movies | 18,253,611,027,729 | 1a0296eeba97d5987e0df96f1e2ab0d034aa60b2 | 98618afe8b9d012fb95c0a145cf403a5756883aa | /movieratings/movieapp/migrations/0003_auto_20151012_0502.py | cb9f0f246799eac172d95d42f7c12eac513b7224 | []
| no_license | https://github.com/rryanburton/django-movies | 0dfa371046c0ac1be999266ca4cb7dbc2773b636 | 8ec346d71a45042a345acbd3e4cb3bd0d0105cf9 | refs/heads/master | 2021-01-10T04:25:44.271106 | 2015-10-15T04:33:13 | 2015-10-15T04:33:13 | 43,751,023 | 0 | 0 | null | false | 2015-10-15T04:33:13 | 2015-10-06T13:04:54 | 2015-10-06T21:10:58 | 2015-10-15T04:33:13 | 236 | 0 | 0 | 0 | Python | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movieapp', '0002_auto_20151010_0205'),
]
operations = [
migrations.RemoveField(
model_name='rater',
name='id',
),
migrations.AddField(
model_name='rater',
name='user',
field=models.OneToOneField(serialize=False, default='', to=settings.AUTH_USER_MODEL, primary_key=True),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 697 | py | 15 | 0003_auto_20151012_0502.py | 7 | 0.601148 | 0.576758 | 0 | 26 | 25.807692 | 115 |
andreeaeliade/socks_project | 12,025,908,452,189 | e7e7ea473f2b632cc1027a2fdda4b292b56f6509 | c0955a5d2c52a602668f2f74872c6edb19e50cba | /deployments.py | 6f50d450cfbf26d8b904601842e7428ace454c2c | []
| no_license | https://github.com/andreeaeliade/socks_project | 61a62eaeade2feaddc40d3df8b2b14d25217ec4a | d963f2ee6de1aa9f7eb71f8b8a0c207ad5051b3e | refs/heads/main | 2023-04-19T11:13:41.704838 | 2021-05-12T12:46:48 | 2021-05-12T12:46:48 | 366,696,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from kubernetes import client, config
from kubernetes.client import configuration
import sys
def main():
contexts, active_context = config.list_kube_config_contexts()
if not contexts:
print("Cannot find any context in kube-config file.")
return
config.load_kube_config(context=active_context["name"])
api = client.AppsV1Api()
if len(sys.argv) == 1 :
ret=api.list_deployment_for_all_namespaces()
print ("List deployments for all namespaces")
else:
ret = api.list_namespaced_deployment(namespace= sys.argv[1])
print ("List deployments for " + sys.argv[1] + " namespace" )
max_name_length = 4
for item in ret.items:
if len(item.metadata.name) > max_name_length:
max_name_length = len(item.metadata.name)
max_name_length +=1
print ("NAME".ljust(max_name_length) + "DATE IMAGES" )
for item in ret.items:
print (item.metadata.name.ljust(max_name_length) + str(item.status.conditions[0].last_update_time) + " " + item.spec.template.spec.containers[0].image )
for container in item.spec.template.spec.containers[1:]:
print ((" " * (max_name_length + 2 + len(str(item.status.conditions[0].last_update_time)))) + container.image)
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 1,252 | py | 3 | deployments.py | 1 | 0.67492 | 0.666134 | 0 | 32 | 37.1875 | 156 |
Kylep342/hello-world | 3,427,383,924,441 | 622098b962bb592dd515dbdba4856a6ef28a84f0 | 0546019a6142812753685a196b96d32866bce038 | /Think_Python/ch9_exercises/odometer_check.py | ae09acc48e65b80f2d90860d7f102727fc1769e1 | []
| no_license | https://github.com/Kylep342/hello-world | e29e62a1c06fb4022ebf371e00b51ad136bc90ba | 867f4e1cec62b124ab4965476cf7027945e3bf9b | refs/heads/master | 2017-12-01T03:19:06.105636 | 2017-08-20T15:15:56 | 2017-08-20T15:15:56 | 60,642,144 | 0 | 0 | null | false | 2017-08-13T05:49:30 | 2016-06-07T19:55:05 | 2016-06-07T19:55:05 | 2017-08-13T05:49:30 | 1 | 0 | 0 | 0 | null | null | null | def is_palindrome(i, start, length):
s = str(i)[start:start + length]
return s[::-1] == s
def odometer_check(i):
return (is_palindrome(i, 2, 4) and
is_palindrome(i + 1, 1, 5) and
is_palindrome(i + 2, 1, 4) and
is_palindrome(i + 3, 0, 6))
def check_all():
i = 100000
while i < 999996:
if odometer_check(i):
print(i)
i += 1
check_all()
| UTF-8 | Python | false | false | 410 | py | 19 | odometer_check.py | 18 | 0.521951 | 0.460976 | 0 | 19 | 20.578947 | 38 |
daanwierstra/pybrain | 618,475,302,135 | ef172456c93547442655c77ea952107008d6a3b3 | 08058cfaeeac83bc5aab8174e8722b93b72f40b3 | /pybrain/rl/agents/egreedy.py | a9a866fdfa910933eab2e828d2d0da06d7ae3a25 | [
"BSD-3-Clause"
]
| permissive | https://github.com/daanwierstra/pybrain | 032e1d5cd3e74d380fdc55a8850d3744d2804a73 | d5d223879831b44b1512b082480e93b612927035 | refs/heads/master | 2021-01-18T19:04:41.513439 | 2009-10-13T13:17:26 | 2009-10-13T13:17:26 | 289,777 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from learning import LearningAgent
from scipy import random, array
class EpsilonGreedyAgent(LearningAgent):
def __init__(self, module, learner):
LearningAgent.__init__(self, module, learner)
self.epsilon = 0.5
self.epsilondecay = 0.9999
def getAction(self):
""" activates the module with the last observation and stores the result as last action. """
# get greedy action
action = LearningAgent.getAction(self)
# explore by chance
if random.random() < self.epsilon:
action = array([random.randint(self.module.numActions)])
# reduce epsilon
self.epsilon *= self.epsilondecay
return action
| UTF-8 | Python | false | false | 752 | py | 150 | egreedy.py | 142 | 0.603723 | 0.594415 | 0 | 24 | 29.916667 | 100 |
narthollis/SysStat | 13,176,959,674,728 | fb04ef966f72cf2503161ea4f9b757e4e2fc7f49 | 9d8bcb7e5c8788b5136abf9ada46a3ee9685eb4d | /monitor/dispatcher.py | a57d6195fdecbfd46f2a4c746f6b84cad3b85968 | []
| no_license | https://github.com/narthollis/SysStat | 36ea0ef6953dcfaafa3f7dbb5c81a3c4d6310b15 | f9d2ae107d25b8474bdd5c256b967130fa167621 | refs/heads/master | 2021-01-01T08:56:44.045247 | 2011-09-05T15:07:11 | 2011-09-05T15:07:11 | 2,327,443 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import hashlib
from configparser import NoOptionError
from socketserver import StreamRequestHandler
from datetime import datetime
class Dispatcher(StreamRequestHandler):
def log(self, msg):
print("%s %s" % (datetime.now().isoformat(), msg))
def boundry(self, item):
data = "%s-%s-%s" % (datetime.now(), self.client_address[0], item.decode())
return hashlib.sha1(data.encode()).hexdigest()
def handle(self):
self.log("New Connection from %s" % (self.client_address[0],))
self.active = True
# Receive the greeting and process it, fail if its wrong
self.data = self.rfile.readline().strip()
if not self.data.startswith(b'HELLO'): return
# Find out who the client claims to be
(junk, identifier) = self.data.split(b' ')
self.log("%s identified as %s" % (
self.client_address[0],
identifier.decode())
)
# Recieve the access code, fail if its wrong or if the cleint didnt ident
# with as a valid client
self.data = self.rfile.readline().strip()
try:
if not self.server.config.get('authorization', identifier.decode()) == \
self.data.decode():
return
except NoOptionError:
return
self.wfile.write(b'OK\n')
self.wfile.flush()
while self.active:
self.data = self.rfile.readline().strip()
if self.data == b'CLOSE': return
elif self.data.startswith(b'GET'):
(junk, item) = self.data.split(b' ')
key = item.decode().lower()
if key in self.server.modules.keys():
self.server.modules[key].reset()
self.server.modules[key].run()
boundry = self.boundry(item)
response = "SENDING %s ----%s----\n" % (key, boundry)
response+= "%s" % (self.server.modules[key],)
response+= "\n----%s---- FINISHED\n" % (boundry, )
self.wfile.write(response.encode())
self.wfile.flush()
else:
self.wfile.write(b'ERROR 100 --- UNKNOWN MODULE\n')
self.wfile.flush()
elif self.data.startswith(b'LIST'):
try:
(junk, item) = self.data.split(b' ')
except ValueError:
self.wfile.write(b'ERROR 200 --- UNKNOWN LIST\n')
if item == b'ALL':
boundry = self.boundry(b'list all')
response = "LIST ALL ----%s----\n" % (boundry, )
response+= "\n".join(self.server.modules.keys())
response+= "\n----%s---- FINISHED\n" % (boundry, )
self.wfile.write(response.encode())
self.wfile.flush()
elif item.decode().lower() in self.server.modules.keys():
boundry = self.boundry(('list %s' % item).encode())
key = item.decode().lower()
try:
response = "LIST %s ----%s----\n" % (key, boundry)
response+= "\n".join(self.server.modules[key].getlist())
response+= "\n----%s---- FINISHED\n" % (boundry, )
self.wfile.write(response.encode())
except AttributeError as e:
print(e)
self.wfile.write(b'ERROR 210 --- MODULE DOSE NOT SUPPORT LIST\n')
finally:
self.wfile.flush()
else:
self.wfile.write(b'ERROR 200 --- UNKNOWN LIST\n')
self.wfile.flush()
else:
self.wfile.write(b'ERROR 000 -- UNKNOWN COMMAND\n')
self.wfile.flush()
| UTF-8 | Python | false | false | 3,459 | py | 16 | dispatcher.py | 13 | 0.555363 | 0.54987 | 0 | 103 | 32.582524 | 79 |
ajaygalagali/codes_python_for_everybody_coursera | 13,804,024,893,082 | ea93328dd83846ca5dc786e95a9556a2e3ce2f9d | d51383bf37f5af1593b9fd8c3a72284ed03067c1 | /Access Web Data/urllibLearn.py | fd3a8aeee1db7e6ec7c4b099c8349cfca3f798fe | []
| no_license | https://github.com/ajaygalagali/codes_python_for_everybody_coursera | dd47bf172418b8eb896cc849c901b345b5fb1a2c | 6549f98c5174b3004f50024261ba9f68a28e6787 | refs/heads/master | 2022-11-07T15:27:31.739403 | 2020-06-27T02:53:00 | 2020-06-27T02:53:00 | 273,063,806 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.error,urllib.request,urllib.parse
import re
fhand = urllib.request.urlopen('https://www.quora.com')
numList = list()
for line in fhand:
tempList = re.findall('https://(w.+")+',line.decode())
if tempList.__sizeof__() < 1:
continue
for i in tempList:
numList.append(i)
for i in numList:
print(i)
# print(urllib.parse.urlencode({'adress':'this is test, yes'}))
| UTF-8 | Python | false | false | 407 | py | 24 | urllibLearn.py | 22 | 0.648649 | 0.646192 | 0 | 15 | 26.066667 | 63 |
hacktoon/dale | 10,393,820,895,991 | 67452336c8d9246c352ab2447b6177a67d2d4f58 | 4ba448fe3437076ea4a6f2e932344c5521a54a52 | /dale/exceptions/__init__.py | 8a4b410edca3c7f03c6f5a66ee465ad83cd79633 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | https://github.com/hacktoon/dale | 2f2284b5532b5c64cd904bc2571bb5365b714e6e | 5b8e82ba82a6238a4ca8aed554330d7b430ab2f3 | refs/heads/master | 2018-12-11T11:55:53.232910 | 2018-12-02T22:12:44 | 2018-12-02T22:12:44 | 111,252,562 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class BaseError(Exception):
pass
class LexingError(BaseError):
def __init__(self, index=None):
super().__init__("Invalid syntax")
self.index = index
class ParsingError(BaseError):
def __init__(self):
super().__init__()
class UnexpectedTokenError(LexingError):
def __init__(self, token, expected_tokens=None):
message = "Unexpected {!r} token.\n".format(token.id)
if expected_tokens:
message += "Expected token(s): {!r}".format(str(expected_tokens))
super().__init__(message)
self.index = token.index[0]
class UnexpectedTokenValueError(LexingError):
def __init__(self, token, expected_tokens=None, expected_values=None):
tpl = "Found a {!r} token with value {!r}.\n"
message = tpl.format(token.id, token.value)
if expected_tokens:
message += "Expected {!r} token(s)".format(str(expected_tokens))
if expected_values:
message += ", with value(s): {!r}".format(str(expected_values))
super().__init__(message)
self.index = token.index[0]
class UnexpectedEOFError(LexingError):
pass
class ExpectedValueError(ParsingError):
pass
| UTF-8 | Python | false | false | 1,202 | py | 26 | __init__.py | 15 | 0.616473 | 0.614809 | 0 | 42 | 27.619048 | 77 |
Jackil-R/datacamp-python-data-engineer-track | 7,997,229,122,468 | 37c426ee5cf55ce75f80532361ceefdc8a96f325 | 0cdca7d5e466597acb809418802aafe4036c175e | /10.Object-Oriented Programming in Python/Ch3-Integrating with Standard Python.py | adf2fa1590b8caa777ab2e50b1d987765daa1158 | []
| no_license | https://github.com/Jackil-R/datacamp-python-data-engineer-track | 313566e463d9d5d714b654eac3e39ddb857d2170 | 8f58fe443dab91555b66264fa8e4557bec29fdbe | refs/heads/master | 2023-02-19T18:47:41.439883 | 2021-01-19T23:00:00 | 2021-01-19T23:00:00 | 325,276,762 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Chapter 3 Integrating with Standard Python
# Operator overloading: comparison
print("=========================================================")
# Overloading equality
print("=========================================================")
# Checking class equality
print("=========================================================")
# Comparison and inheritance
print("=========================================================")
# Operator overloading: string representation
print("=========================================================")
# String formatting review
print("=========================================================")
# String representation of objects
print("=========================================================")
# Exceptions
print("=========================================================")
# Catching exceptions
print("=========================================================")
# Custom exceptions
print("=========================================================")
# Handling exception hierarchies
print("=========================================================") | UTF-8 | Python | false | false | 1,123 | py | 113 | Ch3-Integrating with Standard Python.py | 27 | 0.308994 | 0.308103 | 0 | 56 | 19.071429 | 66 |
jennyslu/trivia_app | 4,449,586,126,143 | f6134b1577c53be1b789edae9fff64650f05fc1a | 8f0d19080adff48b2a8cb2685a680e681fd592ed | /backend/flaskr/models.py | f5ca4baeac109c041824611d82a74332c65cfce8 | []
| no_license | https://github.com/jennyslu/trivia_app | b73566e12f2624adc957562a6cfa2c088666792b | ef74c0d03645fedc5b36f0abcce08a84b9093255 | refs/heads/master | 2021-07-04T06:57:46.156879 | 2020-08-21T05:10:08 | 2020-08-21T05:10:08 | 235,240,781 | 0 | 0 | null | false | 2021-05-06T19:52:25 | 2020-01-21T02:34:35 | 2020-08-21T05:10:27 | 2021-05-06T19:52:25 | 202 | 0 | 0 | 2 | JavaScript | false | false | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String
db = SQLAlchemy()
def setup_db(app):
"""
Bind flask application and SQLAlchemy service together.
"""
db.app = app
db.init_app(app)
db.create_all()
class Question(db.Model):
__tablename__ = 'questions'
id = Column(Integer, primary_key=True)
question = Column(String)
answer = Column(String)
difficulty = Column(Integer)
# actually category ID
category = Column(db.Integer,
db.ForeignKey('categories.id'),
nullable=False)
question_category = db.relationship('Category',
backref='question_category')
def __init__(self, question, answer, category, difficulty):
self.question = question
self.answer = answer
self.category = category
self.difficulty = difficulty
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id, 'question': self.question, 'answer': self.answer,
'difficulty': self.difficulty, 'category': self.category,
'category_name': self.question_category.name
}
class Category(db.Model):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String)
questions = db.relationship('Question', backref='category_questions')
def __init__(self, name):
self.name = name
def format(self):
return {'id': self.id, 'name': self.name}
| UTF-8 | Python | false | false | 1,729 | py | 19 | models.py | 14 | 0.598612 | 0.598612 | 0 | 66 | 25.19697 | 76 |
gulshalla/algorithms | 14,302,241,126,102 | f3e0c4593d31a1881886a8d6eca86291e653548e | 025f7d55e2f1c31249a639c14b19ab605292ed30 | /algorithms/math/happy_numbers.py | d572aac5e0a4f7529da6fd2d4ef16987cf46b31f | [
"MIT"
]
| permissive | https://github.com/gulshalla/algorithms | 98ed9724552419c599f37ed8cc25467274e0be98 | 6ef4866fa4b5a9e5a440ddbc3a620307bd522e11 | refs/heads/master | 2020-04-27T03:09:31.135352 | 2019-12-17T07:11:49 | 2019-12-17T07:11:49 | 174,016,906 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with
any positive integer, replace the number by the sum of the squares of its digits, and repeat
the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle
which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example:
Input: 19
Output: true
Explanation:
12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1
'''
def happy_numbers(n):
mem = set()
while True:
n = sum([int(x)**2 for x in str(n)])
if n in mem:
return False
elif n == 1:
return True
mem.add(n)
def happy_numbers_v2(n):
orig = set()
while 1:
sum = 0
while n > 0:
sum += (n % 10) ** 2
n /= 10
if sum == 1:
return True
else:
if sum in orig:
return False
orig.add(sum)
n = sum
| UTF-8 | Python | false | false | 1,071 | py | 471 | happy_numbers.py | 470 | 0.545285 | 0.505135 | 0 | 44 | 22.772727 | 93 |
AccentDesign/django-sagepaypi | 4,483,945,873,743 | cf55c3d308e58d2c1c38129b40f9f6178b90e611 | 85169df1604bc381d694946e821e11c97bbd8773 | /tests/test_gateway.py | 2bce927e9356c6522898a4a6d3b0f6cfe677745c | [
"MIT"
]
| permissive | https://github.com/AccentDesign/django-sagepaypi | dbf62c569d37201a2c76e4b253cb9e6b6a7dc62b | afb419b25f9819f4a7d09a12a3bb3ac4cd4745c6 | refs/heads/master | 2020-04-21T19:42:35.571754 | 2019-12-11T16:54:49 | 2019-12-11T16:54:49 | 169,816,952 | 0 | 2 | MIT | false | 2019-03-06T15:03:22 | 2019-02-09T00:32:13 | 2019-02-11T12:59:04 | 2019-03-06T15:03:22 | 75 | 0 | 0 | 0 | CSS | false | null | import dateutil
import mock
from django.test import override_settings
from sagepaypi.gateway import default_gateway, SagepayGateway
from tests.mocks import MockResponse
from tests.test_case import AppTestCase
def mocked_gone_response(*args, **kwargs):
return MockResponse({}, 500)
def mocked_success_requests(*args, **kwargs):
if args[0] == 'https://pi-test.sagepay.com/api/v1/merchant-session-keys':
return MockResponse({
'merchantSessionKey': 'unique-key',
'expiry': '2015-08-11T11:45:16.285+01:00'
}, 201)
else:
return MockResponse({}, 201)
@override_settings(SAGEPAYPI_VENDOR_NAME='vendor')
@override_settings(SAGEPAYPI_INTEGRATION_KEY='user')
@override_settings(SAGEPAYPI_INTEGRATION_PASSWORD='pass')
@override_settings(SAGEPAYPI_TEST_MODE=True)
class TestGateway(AppTestCase):
def test_default_gateway(self):
self.assertTrue(isinstance(default_gateway, SagepayGateway))
def test_basic_auth(self):
auth = default_gateway.basic_auth()
self.assertEqual(auth.username, 'user')
self.assertEqual(auth.password, 'pass')
def test_vendor_name(self):
vendor_name = default_gateway.vendor_name()
self.assertEqual(vendor_name, 'vendor')
def test_api_url__when_dev(self):
url = default_gateway.api_url()
self.assertEqual(url, 'https://pi-test.sagepay.com/api/v1')
@override_settings(SAGEPAYPI_TEST_MODE=False)
def test_api_url__when_live(self):
url = default_gateway.api_url()
self.assertEqual(url, 'https://pi-live.sagepay.com/api/v1')
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_success_requests)
def test_get_merchant_session_key(self, mock_post):
default_gateway.get_merchant_session_key()
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/merchant-session-keys',
auth=default_gateway.basic_auth(),
json={'vendorName': 'vendor'}
),
mock_post.call_args_list
)
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_success_requests)
def test_get_merchant_session_key(self, mock_post):
merchant_session_key = default_gateway.get_merchant_session_key()
self.assertEqual(merchant_session_key[0], 'unique-key')
self.assertEqual(merchant_session_key[1], dateutil.parser.parse('2015-08-11T11:45:16.285+01:00'))
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_gone_response)
def test_get_merchant_session_key__returns_none_when_http_error(self, mock_post):
merchant_session_key = default_gateway.get_merchant_session_key()
self.assertIsNone(merchant_session_key)
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_success_requests)
def test_create_card_identifier(self, mock_post):
default_gateway.create_card_identifier({'foo': 1})
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/merchant-session-keys',
auth=default_gateway.basic_auth(),
json={'vendorName': 'vendor'}
),
mock_post.call_args_list
)
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/card-identifiers',
headers={'Authorization': 'Bearer unique-key'},
json={'foo': 1}
),
mock_post.call_args_list
)
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_success_requests)
def test_get_3d_secure_status(self, mock_post):
default_gateway.get_3d_secure_status('123', {'foo': 1})
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/transactions/123/3d-secure',
auth=default_gateway.basic_auth(),
json={'foo': 1}
),
mock_post.call_args_list
)
@mock.patch('sagepaypi.gateway.requests.get', side_effect=mocked_success_requests)
def test_get_transaction_outcome(self, mock_get):
default_gateway.get_transaction_outcome('123')
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/transactions/123',
auth=default_gateway.basic_auth()
),
mock_get.call_args_list
)
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_success_requests)
def test_submit_transaction(self, mock_post):
default_gateway.submit_transaction({'foo': 1})
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/transactions',
auth=default_gateway.basic_auth(),
json={'foo': 1}
),
mock_post.call_args_list
)
@mock.patch('sagepaypi.gateway.requests.post', side_effect=mocked_success_requests)
def test_submit_transaction_instruction(self, mock_post):
default_gateway.submit_transaction_instruction('123', {'foo': 1})
self.assertIn(
mock.call(
'https://pi-test.sagepay.com/api/v1/transactions/123/instructions',
auth=default_gateway.basic_auth(),
json={'foo': 1}
),
mock_post.call_args_list
)
| UTF-8 | Python | false | false | 5,390 | py | 65 | test_gateway.py | 44 | 0.619666 | 0.602412 | 0 | 151 | 34.695364 | 105 |
AChris07/slackwolf-gm | 10,127,532,933,496 | cd5220b66fcff8d8a95e4d2c66e72b4061b29e91 | a2ef94e2f1836f4e598d267118d64cb9c5ead755 | /slackwolf/api/dao/user_dao.py | f0359fec0d204de71abf4b7c6199a3caf92b8215 | []
| no_license | https://github.com/AChris07/slackwolf-gm | adf608267177c09518527cde18116297db03aaed | fdcd9d169fa8037953972e2c4baa50cffd90627f | refs/heads/master | 2022-12-11T16:32:20.897198 | 2020-07-16T03:22:01 | 2020-07-16T03:22:01 | 81,918,278 | 0 | 0 | null | false | 2022-12-08T11:03:42 | 2017-02-14T07:33:29 | 2020-07-16T04:07:00 | 2022-12-08T11:03:41 | 13,276 | 0 | 0 | 4 | Python | false | false | from typing import List
from .team_dao import TeamDao
import slackwolf.db as db
from slackwolf.db.entities.team import Team
from slackwolf.db.entities.user import User
class UserDao:
"""User DAO Interface object"""
def __init__(self):
self.__session = db.session
def find(self, id: int) -> User:
"""Find by Id"""
return self.__session.query(User).get(id)
def find_by_sid(self, team_sid: str, slack_id: str) -> User:
"""Find by team and user Slack Id"""
return self.__session.query(User).\
join(Team).\
filter(Team.slack_id == team_sid).\
filter(User.slack_id == slack_id).\
first()
def find_all(self) -> List[User]:
"""Find all users"""
return self.__session.query(User).all()
def save(self, user: User) -> None:
"""Save the given user"""
self.__session.add(user)
self.__session.commit()
def update(self, id: int, **kwargs) -> None:
"""Given a user Id, update the user"""
self.__session.query(User).\
filter(User.id == id).\
update(kwargs)
self.__session.commit()
def get_or_create_by_sid(self,
team_sid: str,
slack_id: str,
username: str) -> User:
"""Find or create a user by team and user Slack Id"""
user = self.find_by_sid(team_sid, slack_id)
if not user:
user = User(slack_id=slack_id, username=username)
team = TeamDao().find_by_sid(team_sid)
user.team = team
self.save(user)
return user
| UTF-8 | Python | false | false | 1,679 | py | 32 | user_dao.py | 31 | 0.536629 | 0.536629 | 0 | 54 | 30.092593 | 64 |
aslemen/morepo | 13,408,887,919,752 | df3b06b2f01cd17bc3413a9a4dd6a7b159e0a30c | 9aa095ce1ceed0b7a78e6879d912e73f384db59d | /morepo/inputters/morep_json.py | d3d9b0ac9ce0356854d9256b0f2e86bc39708a1a | [
"MIT"
]
| permissive | https://github.com/aslemen/morepo | 235d2cfeeb25b83b5ae3f6c07ea907112c92f94c | 1c7e8d114da471ea250604b7a53e92d192c8349a | refs/heads/master | 2020-03-08T03:58:54.916251 | 2018-08-06T10:48:45 | 2018-08-06T10:48:45 | 127,907,877 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import glob
import typing
import numbers
import packaging.version as vers
import morepo.objects as objs
import morepo.dbio as dbio
import morepo.filters.WTL as wtl
import BTrees.OOBTree as oob
import persistent.list as pl
import persistent.dict as pd
def extract_from_stream(stream) -> typing.List[dict]:
"""
Extract morep.json data from a stream.
Arguments
---------
stream:
A stream
"""
jsdata: dict = json.load(stream)
# check the version
# ---
if vers.Version(jsdata["version"]) < vers.Version("1.1"):
raise Exception()
res: typing.List[dict] = jsdata["content"]
return res
# END
def extract_from_files(root_dir: str, extension: str = "morep.json"):
files_path = glob.iglob(root_dir + ".".join((r"/**/*", extension)), recursive = True)
res_list = []
for fp in files_path:
with open(fp) as stream:
res_list.extend(extract_from_stream(stream))
return res_list
JSON_TYPE_to_MOREP = {
"book": objs.Morep_Bib_Book,
"proceedings": objs.Morep_Bib_Proceedings,
"journal": objs.Morep_Bib_Journal,
"inbook": objs.Morep_Bib_Inbook,
"inproceedings": objs.Morep_Bib_Inproceedings,
"paper": objs.Morep_Bib_Paper,
"presentation": objs.Morep_Bib_Presentation,
"thesis": objs.Morep_Bib_Thesis
}
def convert_list_to_PersistentList_recursively(item: typing.Any) -> pl.PersistentList:
if isinstance(item, list):
return pl.PersistentList(map(convert_list_to_PersistentList_recursively, item))
else:
return item
def convert_dict_to_OOBTree_recursively(item: typing.Any) -> oob.BTree:
if isinstance(item, dict):
res = oob.BTree()
for k, v in item.items():
res[k] = convert_dict_to_OOBTree_recursively(v)
else:
return item
def convert_subitems_to_Persistent_recursively(item: typing.Any):
res = None
if isinstance(item, numbers.Number) or isinstance(item, str) or isinstance(item, bool):
res = item
elif isinstance(item, list):
res = pl.PersistentList()
for subitem in item:
res.append(convert_subitems_to_Persistent_recursively(subitem))
elif isinstance(item, dict):
res = pd.PersistentDict()
for subitem in item.items():
key, val = subitem
res[convert_subitems_to_Persistent_recursively(key)] = \
convert_subitems_to_Persistent_recursively(val)
else:
res = str(item)
return res
def convert_to_Morep_Base(record_raw: typing.Dict) -> objs.Morep_Bib_Base:
entity = JSON_TYPE_to_MOREP[record_raw["work_type"]]()
for k, v in record_raw.items():
setattr(entity, k, convert_subitems_to_Persistent_recursively(v))
return entity
def convert_records_to_Morep_Base(records_raw: typing.Iterator[dict]) -> typing.Iterator[objs.Morep_Bib_Base]:
return map(convert_to_Morep_Base, records_raw)
| UTF-8 | Python | false | false | 3,015 | py | 19 | morep_json.py | 17 | 0.641128 | 0.640464 | 0 | 106 | 27.433962 | 110 |
MichaelPinatton/301Redirect-Screaming-Frog | 12,429,635,383,512 | 06f908642017f55b578426577a230bb72f9cdc64 | ca07a979c321925146c0eb392bc3977fcbaf7f33 | /app.py | ec0234796d12e102b561cdb190b71f389bf68d31 | []
| no_license | https://github.com/MichaelPinatton/301Redirect-Screaming-Frog | 70269ca51dae0aa045a820444c93372d796f02ff | a0186581607cfed40ac84a3a89de0df879c1427a | refs/heads/master | 2023-05-09T05:00:35.169818 | 2021-04-19T16:24:55 | 2021-04-19T16:24:55 | 359,112,211 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import streamlit as st
import os
import base64
from io import BytesIO
import xlsxwriter
st.set_page_config(
page_title="Correspondance des Redirections 301 - Screaming Frog", page_icon="🐶",
)
#Import CSV
st.title('Correspondance des Redirections 301 - Screaming Frog')
st.markdown('')
st.markdown('')
st.write("Correspondance automatique des 301 grâce au crawler Screaming Frog.")
st.write("Fichiers nécessaires : Export CSV des 301 + Export CSV des inlinks vers 301")
st.write("➤ [Explications et démo en vidéo](https://www.loom.com/share/f65ef1c236e1426dbb1d547765724617)")
st.write("By [@MichaelPinatton](https://twitter.com/michaelpinatton)")
st.markdown('## ** ① Chargez le fichier CSV des 301 **')
st.markdown('')
upload1 = st.file_uploader("Choisissez votre fichier (CSV): ", key="1")
if upload1 is not None:
df_301 = pd.read_csv(upload1)
st.write('Aperçu :')
st.write(df_301.head())
else:
pass
st.markdown('## ** ② Chargez le fichier CSV des inlinks 301 **')
st.markdown('')
upload2 = st.file_uploader("Choisissez votre fichier (CSV): ", key="2")
if upload2 is not None:
df_inlinks = pd.read_csv(upload2)
st.write('Aperçu :')
st.write(df_inlinks.head())
else:
pass
#Reorganize DF
if upload1 is not None and upload2 is not None:
df_301 = df_301[['Address', 'Redirect URL', 'Status Code']]
df_301 = df_301.rename({'Address': 'URL Redirigée','Redirect URL': 'URL Finale'}, axis=1)
df_inlinks = df_inlinks[['Source', 'Destination', 'Anchor', 'Link Position']]
df_inlinks = df_inlinks.rename({'Source': 'URL Source','Destination': 'URL Redirigée'}, axis=1)
#Regroup data in 1DF
df = pd.merge(df_inlinks, df_301, how='right', on=['URL Redirigée'])
df = df[["URL Source", "URL Redirigée", "URL Finale", "Status Code", "Anchor", "Link Position"]]
df = df.rename({"URL Finale": "URL Finale (à remplacer dans l'URL source)"}, axis=1)
#Export in XLSX
st.markdown('## **③ Téléchargez le fichier XLSX**')
st.markdown('')
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name="Pages en 301 avec l'URL Source")
workbook = writer.book
center = workbook.add_format({'align': 'center'})
worksheet = writer.sheets["Pages en 301 avec l'URL Source"]
worksheet.set_column('A:A', 10)
worksheet.set_column('B:D', 70)
worksheet.set_column('E:E', 15, center)
worksheet.set_column('F:G', 20)
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
val = to_excel(df)
b64 = base64.b64encode(val)
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="Redirections_301_Full.xlsx">➤ Cliquez pour télécharger</a>' # decode b'abc' => abc
st.markdown(get_table_download_link(df), unsafe_allow_html=True)
else:
pass | UTF-8 | Python | false | false | 3,174 | py | 2 | app.py | 1 | 0.657343 | 0.62206 | 0 | 90 | 33.966667 | 171 |
drankincms/boostedhiggs | 12,506,944,801,630 | 2db5c7ce584016ec81335ca1b4c54b1504973c5f | f2f26e263da6d3cfe66e78fc326744ac512ff7f9 | /test/make_trig_eff.py | 09df428f726353c7dcc86335b8d4299995de8bc6 | []
| no_license | https://github.com/drankincms/boostedhiggs | 64b231a18abd49eee82630501df1ae815d58569f | 31d15d7dc165c3eb081793d79a3770973b7abc21 | refs/heads/dev | 2023-06-25T18:04:01.088815 | 2021-10-26T15:36:13 | 2021-10-26T15:36:13 | 227,409,855 | 0 | 4 | null | true | 2022-04-15T17:41:58 | 2019-12-11T16:22:26 | 2021-10-26T15:36:27 | 2022-04-15T17:41:58 | 24,813 | 0 | 3 | 0 | Python | false | false | from __future__ import print_function, division
import gzip
import json
import os
import uproot
import matplotlib.pyplot as plt
import numpy as np
from coffea import hist
from coffea.util import load, save
import pickle
import gzip
import math
import argparse
#import processmap
#from hists_map import *
plt.rcParams.update({
'font.size': 14,
'axes.titlesize': 18,
'axes.labelsize': 18,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': False,
})
fill_opts = {
'edgecolor': (0,0,0,0.3),
'alpha': 0.8
}
err_opts = {
#'linestyle':'-',
'marker': '.',
'markersize': 10.,
'color':'k',
'elinewidth': 1,
'emarker': '-'
}
chanlist = ["hadhad", "hadel", "hadmu"]
histnames = {
'hadhad': "trigeff_m",
'hadel': "trigeff_m",
'hadmu': "trigeff_m",
}
varcuts_data = {
"hadhad": {"region": "hadhad_signal_0", "trig_pass_ref": [0.5, 1.5]},
"hadel": {"region": "hadel_signal_0", "trig_pass_ref": [0.5, 1.5], "jet_msd": [40.,None]},
"hadmu": {"region": "hadmu_signal_0", "trig_pass_ref": [0.5, 1.5], "jet_msd": [40.,None]},
}
varcuts_mc = {
"hadhad": {"region": "hadhad_signal_0", "trig_pass_ref": [0.5, 1.5]},
"hadel": {"region": "hadel_signal_0", "trig_pass_ref": [0.5, 1.5], "jet_msd": [40.,None]},
"hadmu": {"region": "hadmu_signal_0", "trig_pass_ref": [0.5, 1.5], "jet_msd": [40.,None]},
}
var1names = {
"hadhad": "jet_pt",
"hadel": "jet_pt",
"hadmu": "jet_pt",
}
var1labels = {
"hadhad": "$p_{T}(jet)$",
"hadel": "$p_{T}(jet)$",
"hadmu": "$p_{T}(jet)$",
}
rebin1 = {
"hadhad": [200.,250.,300.,350.,400.,450.,500.,550.,600.,650.,950.],
"hadel": [200.,250.,300.,350.,400.,500.,950.],
"hadmu": [200.,250.,300.,350.,400.,500.,950.],
}
var2names = {
"hadhad": "jet_msd",
"hadel": "lep_pt",
"hadmu": "lep_pt",
}
var2labels = {
"hadhad": "$m_{SD}(jet)$",
"hadel": "$p_{T}(e)$",
"hadmu": "$p_{T}(\mu)$",
}
rebin2 = {
"hadhad": [0.,10.,20.,30.,40.,50.,60.,70.,80.,90.,100.],
"hadel": [20.,32.,44.,56.,68.,92.,116.,140.],
"hadmu": [20.,32.,44.,56.,68.,92.,116.,140.],
}
numsels = {
"hadhad": {"trig_pass": [0.5, 1.5]},
"hadel": {"trig_pass": [0.5, 1.5]},
"hadmu": {"trig_pass": [0.5, 1.5]},
}
#overflow_behavior = 'all'
overflow_behavior = 'over'
def getTrigEff(h,var1_name,var2_name,vars_cut,num_sel,rebins1,rebins2):
#def drawTrigEff(h,var1_name,var1_label,var2_name,var2_label,vars_cut,num_sel,plot_title,plot_label):
print(h)
#print(h.values())
exceptions = [var1_name,var2_name,'dataset']
for var,val in vars_cut.items():
exceptions.append(var)
for var,val in num_sel.items():
exceptions.append(var)
print(exceptions)
x = h.sum(*[ax for ax in h.axes() if ax.name not in exceptions],overflow='all')
if var1_name in num_sel.keys() or var2_name in num_sel.keys():
print("%s and %s cannot be a variable in numerator selection"%(var1_name,var2_name))
return
for var,val in vars_cut.items():
if var!=var1_name and var!=var2_name:
print('integrating ',var,val[0],val[1])
if (len(val)==2):
x = x.integrate(var,slice(val[0],val[1]))#,overflow=overflow_behavior)
elif(type(val) is str):
x = x.integrate(var,val)#,overflow=overflow_behavior)
elif(len(val)==1):
x = x.integrate(var,val[0])#,overflow=overflow_behavior)
x_num = x.copy()
#x_num = x_num.sum(*[ax for ax in x_num.axes() if ax.name in num_sel],overflow='all') #same axes as numerator
#x_num.clear()
xlist = []
for var,val in num_sel.items():
if var!=var1_name and var!=var2_name:
print('integrating ',var,val)
print(var,val)
if (len(val)==2):
#xlist.append(x.integrate(var,slice(val[0],val[1])))#,overflow=overflow_behavior))
x_num = x_num.integrate(var,slice(val[0],val[1]))#,overflow=overflow_behavior))
elif(len(val)==1):
#xlist.append(x.integrate(var,val[0]))#,overflow=overflow_behavior))
x_num = x_num.integrate(var,val[0])#,overflow=overflow_behavior))
#for xadd in xlist:
# x_num.add(xadd)
x = x.sum(*[ax for ax in x.axes() if ax.name in num_sel],overflow='none')
#print(x.values())
#print(x_num.values())
x = x.sum(*["dataset"],overflow='allnan')
x_num = x_num.sum(*["dataset"],overflow='allnan')
#x = x.rebin(var1_name, hist.Bin(var1_name+"_new", var1_name+"_new", rebins1))
#x = x.rebin(var2_name, hist.Bin(var2_name+"_new", var2_name+"_new", rebins2))
#x_num = x_num.rebin(var1_name, hist.Bin(var1_name+"_new", var1_name+"_new", rebins1))
#x_num = x_num.rebin(var2_name, hist.Bin(var2_name+"_new", var2_name+"_new", rebins2))
x = x.rebin(var1_name, hist.Bin(var1_name, var1_name, rebins1))
x = x.rebin(var2_name, hist.Bin(var2_name, var2_name, rebins2))
x_num = x_num.rebin(var1_name, hist.Bin(var1_name, var1_name, rebins1))
x_num = x_num.rebin(var2_name, hist.Bin(var2_name, var2_name, rebins2))
x_bins = x.axis(var1_name).edges()
y_bins = x.axis(var2_name).edges()
den_arr = np.array(x.values(overflow='all')[()])
num_arr = np.array(x_num.values(overflow='all')[()])
if ([ax.name for ax in x.axes()][0]==var1_name):
den_arr = np.transpose(den_arr)
num_arr = np.transpose(num_arr)
den_arr[:][1] = den_arr[:][1] + den_arr[:][0]
den_arr[:][-2] = den_arr[:][-2] + den_arr[:][-1]
num_arr[:][1] = num_arr[:][1] + num_arr[:][0]
num_arr[:][-2] = num_arr[:][-2] + num_arr[:][-1]
den_arr[1][:] = den_arr[1][:] + den_arr[0][:]
den_arr[-2][:] = den_arr[-2][:] + den_arr[-1][:]
num_arr[1][:] = num_arr[1][:] + num_arr[0][:]
num_arr[-2][:] = num_arr[-2][:] + num_arr[-1][:]
den_arr = np.delete(den_arr,-1,0)
den_arr = np.delete(den_arr,0,0)
den_arr = np.delete(den_arr,-1,1)
den_arr = np.delete(den_arr,0,1)
num_arr = np.delete(num_arr,-1,0)
num_arr = np.delete(num_arr,0,0)
num_arr = np.delete(num_arr,-1,1)
num_arr = np.delete(num_arr,0,1)
print(num_arr)
print(den_arr)
print(x_bins)
print(y_bins)
eff_range_arr = hist.clopper_pearson_interval(num_arr, den_arr)
return np.transpose(np.divide(num_arr,den_arr, out=np.zeros_like(num_arr), where=den_arr!=0)),x_bins,y_bins,np.transpose(eff_range_arr,[0,2,1])
def getHists(filename_data,filename_mc,hadel_w,hadmu_w,hadhad_w):
eff_hists_data = {}
eff_hists_mc = {}
eff_hists_data_int = {}
eff_hists_mc_int = {}
x_bins = {}
y_bins = {}
chan_w = {'hadhad':hadhad_w,'hadel':hadel_w,'hadmu':hadmu_w}
for chan in chanlist:
h_trig = None
for f_d in filename_data:
# open hists
hists_unmapped_data = load('%s.coffea'%f_d)
# map to hists
for key in hists_unmapped_data:
if (key==histnames[chan]):
if not h_trig:
h_trig = hists_unmapped_data[key]
else:
h_trig = h_trig + hists_unmapped_data[key]
print(f_d)
eff_hists_data[chan],_,_,eff_hists_data_int[chan] = getTrigEff(h_trig,var1names[chan],var2names[chan],varcuts_data[chan],numsels[chan],rebin1[chan],rebin2[chan])
#drawTrigEff(h_trig,args.var1name,args.var1label,args.var2name,args.var2label,vars_cuts,num_sels,args.title,args.label)
h_trig = None
if (len(chan_w[chan]) != len(filename_mc)):
chan_w[chan] = [1. for f in filename_mc]
for i,f_m in enumerate(filename_mc):
# open hists
hists_unmapped_mc = load('%s.coffea'%f_m)
# map to hists
for key in hists_unmapped_mc:
if (key==histnames[chan]):
if (chan_w[chan][i] != 1.):
hists_unmapped_mc[key].scale(chan_w[chan][i])
if not h_trig:
h_trig = hists_unmapped_mc[key]
else:
h_trig = h_trig + hists_unmapped_mc[key]
print(f_m)
eff_hists_mc[chan],x_bins[chan],y_bins[chan],eff_hists_mc_int[chan] = getTrigEff(h_trig,var1names[chan],var2names[chan],varcuts_mc[chan],numsels[chan],rebin1[chan],rebin2[chan])
#drawTrigEff(h_trig,args.var1name,args.var1label,args.var2name,args.var2label,vars_cuts,num_sels,args.title,args.label)
return eff_hists_data,eff_hists_mc,x_bins,y_bins,eff_hists_data_int,eff_hists_mc_int
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--hists_data', dest='hists_data', default="hists_data", help="hists pickle name (data)", nargs='+')
parser.add_argument('--hists_mc', dest='hists_mc', default="hists_mc", help="hists pickle name (MC)", nargs='+')
parser.add_argument('--hadel_w', dest='hadel_w', default=[1.], help="HadEl File Weights (MC)", nargs='+', type=float)
parser.add_argument('--hadmu_w', dest='hadmu_w', default=[1.], help="HadMu File Weights (MC)", nargs='+', type=float)
parser.add_argument('--hadhad_w', dest='hadhad_w', default=[1.], help="HadHad File Weights (MC)", nargs='+', type=float)
#parser.add_argument('--histname', dest='histname', default="trigeff", help="hist name")
parser.add_argument('--tag', dest='tag', default="trig_sf_debug", help="tag")
parser.add_argument('--output', dest='output', default="../boostedhiggs/data/trig_sf_corr", help="output")
parser.add_argument('--year', dest='year', default="2017", help="year")
#parser.add_argument('--varname', dest='varname', default="", help="varname")
#parser.add_argument('--varlabel', dest='varlabel', default="", help="varlabel")
#parser.add_argument('--varcuts', dest='varcuts', default="", help="varcuts", nargs='+')
#parser.add_argument('--numsel', dest='numsel', default="", help="numsel", nargs='+')
#parser.add_argument('--title', dest='title', default="", help="title")
#parser.add_argument('--label', dest='label', default="", help="label")
args = parser.parse_args()
#python make_trig_eff.py --hists_data ../condor/May27_Trig/hists_trig_Run2017CDEF --hists_mc ../condor/May27_Trig/hists_trig_QCD
eff_hists_data,eff_hists_mc,x_bins,y_bins,eff_hists_data_int,eff_hists_mc_int = getHists(args.hists_data,args.hists_mc,args.hadel_w,args.hadmu_w,args.hadhad_w)
h_trig_sf = {}
arr_sf = {}
arr_sf_up = {}
arr_sf_down = {}
h_trig_eff_mc = {}
h_trig_eff_data = {}
for chan in chanlist:
h_trig_sf[args.year+"_trigsf_"+chan+"_nom"] = hist.Hist("Trigger Scale Factor (%s) Nominal"%chan,
hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),
hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),
)
h_trig_sf[args.year+"_trigsf_"+chan+"_up"] = hist.Hist("Trigger Scale Factor (%s) Up"%chan,
hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),
hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),
)
h_trig_sf[args.year+"_trigsf_"+chan+"_down"] = hist.Hist("Trigger Scale Factor (%s) Down"%chan,
hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),
hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),
)
h_trig_eff_mc[chan] = hist.Hist("Trigger Efficiency, MC (%s)"%chan,
hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),
hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),
)
h_trig_eff_data[chan] = hist.Hist("Trigger Efficiency, Data (%s)"%chan,
hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),
hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),
)
inputs = {}
inputs_up = {}
inputs_down = {}
inputs_mc = {}
inputs_data = {}
inputs[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_up[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_up[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_down[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_down[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_mc[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_mc[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_data[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs_data[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])
inputs["weight"] = np.divide(eff_hists_data[chan],eff_hists_mc[chan],out=np.ones_like(eff_hists_data[chan]), where=eff_hists_mc[chan]!=0.).flatten()
inputs_up["weight"] = np.divide(eff_hists_data_int[chan][1],eff_hists_mc_int[chan][0],out=np.ones_like(eff_hists_data_int[chan][1]), where=eff_hists_mc_int[chan][0]!=0.).flatten()
inputs_down["weight"] = np.divide(eff_hists_data_int[chan][0],eff_hists_mc_int[chan][1],out=np.ones_like(eff_hists_data_int[chan][0]), where=eff_hists_mc_int[chan][1]!=0.).flatten()
arr_sf[chan] = inputs["weight"]
arr_sf_up[chan] = inputs_up["weight"]
arr_sf_down[chan] = inputs_down["weight"]
inputs_mc["weight"] = eff_hists_mc[chan].flatten()
inputs_data["weight"] = eff_hists_data[chan].flatten()
h_trig_sf[args.year+"_trigsf_"+chan+"_nom"].fill(**inputs)
h_trig_sf[args.year+"_trigsf_"+chan+"_up"].fill(**inputs_up)
h_trig_sf[args.year+"_trigsf_"+chan+"_down"].fill(**inputs_down)
h_trig_eff_mc[chan].fill(**inputs_mc)
h_trig_eff_data[chan].fill(**inputs_data)
for chan in chanlist:
fig,ax = plt.subplots(1,1, figsize=(8,8))
hist.plot2d(h_trig_sf[args.year+"_trigsf_"+chan+"_nom"],
ax=ax,
clear=True,
xaxis=var1names[chan],
)
for i in range(len(y_bins[chan])-1):
for j in range(len(x_bins[chan])-1):
ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., "{:0.2f}".format(np.reshape(arr_sf[chan],(len(x_bins[chan])-1,len(y_bins[chan])-1))[j,i]) if eff_hists_mc[chan][j,i]>0. else "",
color="k", ha="center", va="center")#, fontweight="bold")
fig.savefig("%s/trig_sf_debug_%s.pdf"%(args.tag,chan))
for chan in chanlist:
fig,ax = plt.subplots(1,1, figsize=(8,8))
hist.plot2d(h_trig_sf[args.year+"_trigsf_"+chan+"_up"],
ax=ax,
clear=True,
xaxis=var1names[chan],
)
for i in range(len(y_bins[chan])-1):
for j in range(len(x_bins[chan])-1):
ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., "{:0.2f}".format(np.reshape(arr_sf_up[chan]-arr_sf[chan],(len(x_bins[chan])-1,len(y_bins[chan])-1))[j,i]) if eff_hists_mc_int[chan][0][j,i]>0. else "",
color="k", ha="center", va="center")#, fontweight="bold")
fig.savefig("%s/trig_sf_debug_up_%s.pdf"%(args.tag,chan))
for chan in chanlist:
fig,ax = plt.subplots(1,1, figsize=(8,8))
hist.plot2d(h_trig_sf[args.year+"_trigsf_"+chan+"_down"],
ax=ax,
clear=True,
xaxis=var1names[chan],
)
for i in range(len(y_bins[chan])-1):
for j in range(len(x_bins[chan])-1):
ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., "{:0.2f}".format(np.reshape(arr_sf_down[chan]-arr_sf[chan],(len(x_bins[chan])-1,len(y_bins[chan])-1))[j,i]) if eff_hists_mc_int[chan][1][j,i]>0. else "",
color="k", ha="center", va="center")#, fontweight="bold")
fig.savefig("%s/trig_sf_debug_down_%s.pdf"%(args.tag,chan))
for chan in chanlist:
fig,ax = plt.subplots(1,1, figsize=(8,8))
hist.plot2d(h_trig_eff_data[chan],
ax=ax,
clear=True,
xaxis=var1names[chan],
)
for i in range(len(y_bins[chan])-1):
for j in range(len(x_bins[chan])-1):
ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., "{:0.2f}".format(eff_hists_data[chan][j,i]) if eff_hists_data[chan][j,i]>0. else "",
color="k", ha="center", va="center")#, fontweight="bold")
fig.savefig("%s/trig_eff_data_debug_%s.pdf"%(args.tag,chan))
for chan in chanlist:
fig,ax = plt.subplots(1,1, figsize=(8,8))
hist.plot2d(h_trig_eff_mc[chan],
ax=ax,
clear=True,
xaxis=var1names[chan],
)
for i in range(len(y_bins[chan])-1):
for j in range(len(x_bins[chan])-1):
ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., "{:0.2f}".format(eff_hists_mc[chan][j,i]) if eff_hists_mc[chan][j,i]>0. else "",
color="k", ha="center", va="center")#, fontweight="bold")
fig.savefig("%s/trig_eff_mc_debug_%s.pdf"%(args.tag,chan))
print(h_trig_sf)
save(h_trig_sf,"%s_%s.coffea"%(args.output,args.year))
| UTF-8 | Python | false | false | 18,869 | py | 48 | make_trig_eff.py | 35 | 0.551699 | 0.524193 | 0 | 395 | 46.76962 | 256 |
davidb-github/daily-journal-server | 16,758,962,399,061 | 4ee5345bbd9ec4457448b9255961a3ed20ed5158 | bccfb63ca0aff929497df545c96276a02f00c8c0 | /entries/request.py | a617992ee83d7df019794302b2d00912a6f2b775 | []
| no_license | https://github.com/davidb-github/daily-journal-server | b945b6b1098bfeb650b7c70b9fd5914fe27c70ed | 1d0007128c6fb51e962958e3869bd493f733e327 | refs/heads/main | 2023-02-18T09:16:26.219635 | 2021-01-20T15:24:25 | 2021-01-20T15:24:25 | 331,330,411 | 0 | 0 | null | false | 2021-01-20T15:24:26 | 2021-01-20T14:25:36 | 2021-01-20T14:25:54 | 2021-01-20T15:24:26 | 0 | 0 | 0 | 0 | Python | false | false | import sqlite3
import json
from models import Entry
def get_all_entries():
#open a connection to db
with sqlite3.connect("./dailyjournal.db") as conn:
# setup row and cursor
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
# select query
db_cursor.execute("""
SELECT
e.id,
e.date,
e.concept,
e.entry,
e.mood_id
FROM entries AS e
""")
# init an emply list to hold customer representations
entries = []
# convert rows of data into python list
dataset = db_cursor.fetchall()
# iterate list returned from database
for row in dataset:
entry = Entry(row['id'], row['date'], row['concept'],
row['entry'], row['mood_id'])
entries.append(entry.__dict__)
# Use `json` package to properly serialize list as JSON
return json.dumps(entries) | UTF-8 | Python | false | false | 982 | py | 5 | request.py | 5 | 0.547862 | 0.544807 | 0 | 39 | 24.205128 | 65 |
SanjoyPator1/ImageProcessingSkillsTrackDataCamp | 1,211,180,829,294 | 2f3bc01f7492b8beac490c54f041d938eaeee22a | 3deb8547395318885455a1094bf32291ee84c5fd | /C03_Image_Processing_With_Keras/04EvaluatingClassifier.py | a1afe2e78050897ca272388b0a33f5051fa6af6c | []
| no_license | https://github.com/SanjoyPator1/ImageProcessingSkillsTrackDataCamp | 1978520dae29e89533d840af81e2252711f51bc0 | a6b7eb1b9ad193336ea02b81619de0b437940744 | refs/heads/master | 2022-12-28T12:44:30.096203 | 2020-10-04T16:15:02 | 2020-10-04T16:15:02 | 298,842,849 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
test_labels = array([[0., 0., 1.],
[0., 1., 0.],
[0., 0., 1.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 1., 0.]])
predictions = array([[0., 0., 1.],
[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.],
[0., 0., 1.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]])
# Calculate the number of correct predictions
number_correct = (test_labels*predictions).sum()
print(number_correct)
# Calculate the proportion of correct predictions
proportion_correct = number_correct / (len(predictions))
print(proportion_correct) | UTF-8 | Python | false | false | 623 | py | 41 | 04EvaluatingClassifier.py | 38 | 0.449438 | 0.372392 | 0 | 28 | 21.214286 | 56 |
FychanTW/automation_PO | 1,889,785,611,360 | aa8595df23a014f247ddc720381b69a1ff3964be | 370913383bb160539819c02bb2bd918548867ac4 | /src/AboutWindow_GUI.py | a630dcf74a2a6537b39af464415d1f7a69727645 | []
| no_license | https://github.com/FychanTW/automation_PO | c6f6beb13641cb736a47851e834eaee2b7fc9f4f | 314081aa3354f67d7117e6be3648d0cef72b2e69 | refs/heads/main | 2023-08-23T13:36:39.685594 | 2021-10-31T09:46:48 | 2021-10-31T09:46:48 | 418,862,024 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AboutWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AboutWindow(object):
def setupUi(self, AboutWindow):
AboutWindow.setObjectName("AboutWindow")
AboutWindow.resize(364, 475)
font = QtGui.QFont()
font.setFamily("Arial")
AboutWindow.setFont(font)
self.centralwidget = QtWidgets.QWidget(AboutWindow)
self.centralwidget.setObjectName("centralwidget")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(30, 40, 311, 121))
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setGeometry(QtCore.QRect(20, 20, 271, 91))
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(30, 280, 311, 91))
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName("groupBox_2")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(30, 180, 311, 81))
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName("groupBox_3")
self.label_2 = QtWidgets.QLabel(self.groupBox_3)
self.label_2.setGeometry(QtCore.QRect(30, 20, 291, 51))
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
AboutWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(AboutWindow)
self.statusbar.setObjectName("statusbar")
AboutWindow.setStatusBar(self.statusbar)
self.retranslateUi(AboutWindow)
QtCore.QMetaObject.connectSlotsByName(AboutWindow)
def retranslateUi(self, AboutWindow):
_translate = QtCore.QCoreApplication.translate
AboutWindow.setWindowTitle(_translate("AboutWindow", "About"))
self.groupBox.setTitle(_translate("AboutWindow", "About this program"))
self.label.setText(_translate("AboutWindow", "This program provides a quick access to an internal used Trello system for procurement process initiated in D Lab, Institute of Science, Nagoya University."))
self.groupBox_2.setTitle(_translate("AboutWindow", "License"))
self.groupBox_3.setTitle(_translate("AboutWindow", "Authors"))
self.label_2.setText(_translate("AboutWindow", "<html><head/><body><p>Feng-Yueh Chan, GitHub: FychanTW</p><p>Yui Kanaoka, GitHub: yuikanaoka</p></body></html>"))
| UTF-8 | Python | false | false | 3,038 | py | 13 | AboutWindow_GUI.py | 9 | 0.686307 | 0.659645 | 0 | 64 | 46.46875 | 212 |
dflemin3/diskpy | 18,107,582,123,213 | 9e48ae227baaf9aa1d0e6a12b1bd79d13469fc76 | 919d9f3f071238fe5b64aa217f54a896bcd8ac85 | /diskpy/clumps/simclumps/_simclumps.py | 633bb7126bf09a9d68978a40761f6bf392e44bd0 | [
"MIT"
]
| permissive | https://github.com/dflemin3/diskpy | 5a54d0216db93adeae79ab60cb88a091af7b5073 | 9a89562f6dda0b904439872136a04b90ea5cbc4e | refs/heads/master | 2021-01-18T08:47:37.442028 | 2015-11-04T21:14:04 | 2015-11-04T21:14:04 | 40,382,686 | 1 | 0 | null | true | 2015-11-04T21:04:34 | 2015-08-07T22:03:12 | 2015-08-07T22:03:13 | 2015-11-04T21:04:34 | 4,716 | 0 | 0 | 0 | Python | null | null | # -*- coding: utf-8 -*-
"""
Defines all the data classes for storing, managing, organizing, and accessing
clumps.
Created on Thu May 7 21:38:26 2015
@author: ibackus
"""
import pynbody as pb
SimArray = pb.array.SimArray
SimSnap = pb.snapshot.SimSnap
import numpy as np
from warnings import warn
def newSimgroup(simlist):
"""
Generates a new simgroup object from a list of clumplists.
"""
nSims = len(simlist)
simgrp = simgroup(nSims)
for i, s in enumerate(simlist):
simgrp[i] = newSim(s)
return simgrp
def newSim(clumplist):
"""
Generates a sim object from a list of clump dictionaries for an entire
simulation.
"""
if len(clumplist) < 1:
return sim(0)
nClumps = len(clumplist)
s = sim(nClumps)
for i, cl in enumerate(clumplist):
s[i] = newClump(cl)
return s
def newClump(clumpDict):
"""
Generates a new clump object from a clump dictionary (see clumps.blank_clump
and clumps.build_clumps).
"""
# Number of timesteps in this simulation
nt = len(clumpDict['pos'])
# ---Compatability---
# Deal with an unfortunate name convention. pynbody uses 'vel' by default
# to refer to 3D velocity, whereas clumpDicts may use 'v'
if 'v' in clumpDict:
clumpDict['vel'] = clumpDict.pop('v')
if 'm' in clumpDict:
clumpDict['mass'] = clumpDict.pop('m')
# NaN values for N are unnatural--make them zeros
if 'N' in clumpDict:
clumpDict['N'][np.isnan(clumpDict['N'])] = 0
# Initialize a blank clump
cl = clump(nt)
# Load all the arrays that are created by default in clump.__init__
unloaded_keys = set(clumpDict.keys())
for k in cl.keys():
if k in clumpDict.keys():
cl[k] = clumpDict[k]
unloaded_keys.remove(k)
# Try to load any new arrays which are present in clumpDict
for k in clumpDict.keys():
if k in unloaded_keys:
unloaded_keys.remove(k)
v = clumpDict[k]
if np.ndim(v) > 1:
ndim = v.shape[1]
else:
ndim = 1
cl._create_array(k, ndim, v.dtype)
cl[k] = v
if len(unloaded_keys) > 0:
warn('Could not load {0} into clump'.format(unloaded_keys))
return cl
def clump(nt):
"""
Generates a clump object, which is basically just a pynbody SimSnap where
time steps are represented by particles.
**ARGUMENTS**
nt : int
Number of time steps in the simulation
"""
cl = pb.new(s=nt)
cl._create_arrays(['r_clump', 'time', 'm', 'T', 'rho'], ndim=1,\
dtype=float)
cl._create_arrays(['L'], ndim=3, dtype=float)
cl._create_array('N', dtype=int)
return cl
class sim(np.ndarray):
"""
The sim class. sims are basically just numpy arrays containing clump
objects. It stores data for all the clumps in a given simulation (for all
available time steps). Additionally, slicing can be done in different ways
(see below)
To initialize a blank sim object:
>>> s = sim(nClumps)
To generate a sim object from a list of clumpDicts for a simulation (ie
the output of :func:`clumps.clump_tracker`)
>>> s = newSim(clumplist)
Where nClumps is the number of clumps in the simulation
Accessing different quantites:
>>> s = newSim(clumplist) # create sim object from clumplist
>>> x = s('x', t=slice(None)) # get x for all clumps at all times
# Get the mass for the first time step at which the clumps exist
>>> m0 = s('mass',t=0,clumpexist=True)
# Get velocity for the final timestep at which a clump exists
>>> vf = s('vel', t=-1, clumpexist=True)
# Get masses of all clumps at end of simulation
>>> all_masses = s('mass', t=-1)
>>> mask = ~np.isnan(all_masses)
>>> mf = all_masses[mask]
Print available keys:
>>> s.keys()
Select a sub-group of clumps by normal array slicing:
>>> s1 = s[0:10]
**CALL FUNCTION ARGUMENTS**
s(key, t=None, clumpexist=False, clump=None)
key : str
Clump key to access
t : slicer
Anything that can be used to slice a numpy array, eg [1,2,4] or
slice(0,-1). Slices according to the simulation time
clumpexist : bool
Filter out all quantities by whether the clump exists at that time.
e.g., this makes it easy to select the first timestep when a clump
exists
clump : slicer
Anything that can be used to slice a numpy array, eg [1,2,4] or
slice(0,-1). Slices by clump number
"""
def __new__(subtype, shape, buffer=None, offset=0, \
strides=None, order=None):
dtype = object
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, \
strides, order)
return obj
def __array_finalize__(self, obj):
if obj is None:
newSim
return
def __call__(self, key, t=None, clumpexist=False, clump=None):
"""
"""
# ---------------------------------------
# Initialize
# ---------------------------------------
s = _simslice(self, clump)
test_array = s[0][0][key]
units = _getunits(test_array)
dim = _getdim(test_array)
nt = _getnt(s, t)
if nt <= 0:
# Nothing matches t
return None
nClumps = len(s)
dummy = s[0][0][key]
dtype = dummy.dtype
outarray = self._init_array(nClumps, nt, dim, dtype, units)
usemask = np.ones(nClumps, dtype=bool)
# ---------------------------------------
# Access the data
# ---------------------------------------
for iClump, cl in enumerate(s):
if clumpexist:
# only look at the times where the clump exists
cl = cl[cl['exists']]
if t is not None:
try:
cl = cl[t]
except IndexError:
cl = []
nt_use = len(cl)
if nt_use > 0:
if nt > 1:
outarray[iClump, 0:nt_use] = cl[key]
else:
outarray[iClump] = cl[key]
else:
usemask[iClump] = False
# ---------------------------------------
# Filter/return data
# ---------------------------------------
if clumpexist:
return outarray[usemask]
else:
return outarray
def __repr__(self):
printstr = "sim object. {0} clumps, {1} time steps".format(\
self.nClumps(), self.nt())
return printstr
def __str__(self):
return self.__repr__()
def _init_array(self, nClumps, nt=0, dim=0, dtype=float, units=None):
"""
Initialize a blank array for multi-clump slicing. Default fill values
are NaN for floats, -1 for ints, and 0 otherwise. This is useful
for automatically flagging when clumps do not exist
**ARGUMENTS**
nClumps : int
Number of clumps
nt : int
Number of time steps
dim : int
Dimension of the array. IE 3 for 'vel' or 1 for 'x'
dtype : dtype
dtype of the array
units : (see pynbody.units)
Units for the array
**RETURNS**
An array of a shape suitable for nClumps, nt, and dim
"""
shape = [nClumps]
if nt > 1:
shape.append(nt)
if dim > 1:
shape.append(dim)
if np.issubdtype(dtype, float):
fill_val = np.nan
elif np.issubdtype(dtype, int):
fill_val = -1
else:
fill_val = 0
outarray = SimArray(fill_val*np.ones(shape, dtype=dtype), units)
return outarray
def keys(self):
"""
Return keys present in all clumps
"""
return _keys(self)
def nClumps(self):
"""
Returns the number of clumps in the simulation
"""
return len(self)
def nt(self):
"""
Returns the number of timesteps in the simulation
"""
if self.nClumps() > 0:
return len(self[0])
else:
return 0
class simgroup(np.ndarray):
"""
The simgroup class. Basically just an array containing a bunch of sim
objects. Meant to contain information for clumps in a suite of simulations
To initialize a blank sim group:
>>> sgrp = simgroup(nSims)
To initialize from a list of simulations (ie a list of the outputs from
:func:`clumps.clumptracker`):
>>> sgrp = newSimgroup(simlist)
Accessing different quantites:
>>> s = newSimgroup(simlist) # create sim object from simlist
>>> x = s('x', t=slice(None)) # get x for all clumps at all times
# Get the mass for the first time step at which the clumps exist
>>> m0 = s('mass',t=0,clumpexist=True)
# Get velocity for the final timestep at which a clump exists
>>> vf = s('vel', t=-1, clumpexist=True)
# Get masses of all clumps at end of simulation
>>> all_masses = s('mass', t=-1)
>>> mask = ~np.isnan(all_masses)
>>> mf = all_masses[mask]
Print available keys:
>>> s.keys()
Select a sub-group of simulations by normal array slicing:
>>> s1 = s[0:10]
**CALL FUNCTION ARGUMENTS**
s(key, t=None, clumpexist=False, clump=None, sims=None)
key : str
Clump key to access
t : slicer
Anything that can be used to slice a numpy array, eg [1,2,4] or
slice(0,-1). Slices according to the simulation time
clumpexist : bool
Filter out all quantities by whether the clump exists at that time.
e.g., this makes it easy to select the first timestep when a clump
exists
clump : slicer
Anything that can be used to slice a numpy array, eg [1,2,4] or
slice(0,-1). Slices by clump number
sims : slicer
Anything that can be used to slice a numpy array, eg [1,2,4] or
slice(0,-1). Slices by simulation number
"""
def __new__(subtype, shape, buffer=None, offset=0, \
strides=None, order=None):
dtype = sim
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, \
strides, order)
return obj
def __array_finalize__(self, obj):
return
def __call__(self, key, t=None, clumpexist=False, clump=None, sims=None):
"""
Call documentation for simgroups
"""
# Slice the simulation group is needed
simgrp = _simgroupslice(self, sims)
# No simulations were selected
if len(simgrp) < 1:
return None
# Loop through all the simulations at generate the request arrays
outlist = []
for iSim, s in enumerate(simgrp):
if len(s) > 0:
# The simulation has a clump. Now generate the requested array
val = s(key,t,clumpexist,clump)
else:
# The simulation has no clumps
val = None
# Append output (if it's not None)
if val is not None:
if len(val) > 0:
outlist.append(val)
# Concatenate the list of output arrays into a single SimArray
outarray = arraylistcat(outlist)
return outarray
def __repr__(self):
printstr = 'simgroup object. {0} simulations, {1} clumps'.format(\
self.nsim(), self.nClumps())
return printstr
def __str__(self):
return self.__repr__()
def keys(self):
"""
Return keys present in all clumps
"""
return _keys(self)
def nsim(self):
"""
Return the number of simulations present here
"""
return len(self)
def nClumps(self):
n = 0
for simulation in self:
n += simulation.nClumps()
return n
def _keys(obj):
"""
Return the clump keys present in all things here
"""
k = []
if len(obj) > 0:
# There is at least one thing
for x in obj:
# Make sure keys() is defined
if hasattr(x, 'keys'):
k.extend(x.keys())
k = list(set(k))
k.sort()
return k
@pb.derived_array
def exists(sim):
"""
Defines a pynbody derived array which determines the time steps a clump
exists at
"""
return (sim['N'] > 0)
def len2(x):
"""
A modified version of len() to work with numbers. Numbers have a length
of 1
"""
if hasattr(x, '__len__'):
length = len(x)
elif isinstance(x, (int,float,long,complex)):
length = 1
return length
def arraylistcat(arraylist):
"""
Concatenate a list of like arrays (or SimArrays) into a single array.
Concatenates along the first dimension
Returns None for an empty list
"""
if len(arraylist) < 1:
return None
nx = 0
for x in arraylist:
nx += len(x)
dummy = arraylist[0]
shape = list(dummy.shape)
shape[0] = nx
units = _getunits(dummy)
outarray = SimArray(np.zeros(shape), units)
counter = 0
for array in arraylist:
outarray[counter:counter+len(array)] = array
counter += len(array)
return outarray
def _simslice(simulation, clump=None):
"""
A method for slicing a sim, guaranteeing a sim object is returned.
clump is anything that can be used to slice an array
"""
if clump is None:
# No slicing required
s = simulation
else:
# Slice according to clump
s = simulation[clump]
if not isinstance(s, sim):
# Cast s as a sim object
dummy = sim(1)
dummy[0] = s
s = dummy
return s
def _simgroupslice(simgrp, sims=None):
"""
A method for slicing a simgroup, guaranteeing a simgroup object is returned.
sims is anything that can be used to slice an array
"""
if sims is None:
# No slicing required
s = simgrp
else:
# Slice according to sims
s = simgrp[sims]
if not isinstance(s, simgroup):
# Cast s as a sim object
dummy = simgroup(1)
dummy[0] = s
s = dummy
return s
def _getunits(x):
"""
Attempts to get the units of x. If x has not units, None is returned
"""
if pb.units.has_units(x):
units = x.units
else:
units = None
return units
def _getdim(x):
"""
Get the dimension of an array x. IE, for 'vel' dim=3, for 'z', dim=1
For x shape (N,) dim = 0
For x shape (N,m) dim = m
For x shape (N1, N2, ...,m) dim = m
"""
if np.ndim(x) > 1:
dim = x.shape[-1]
else:
dim = 0
return dim
def _getnt(simulation, t=None):
"""
Get the total number of time steps the slicer t will create for a simulation
"""
nt_sim = simulation.nt()
if t is not None:
dummy = np.zeros(nt_sim)
nt = len2(dummy[t])
else:
nt = nt_sim
return nt | UTF-8 | Python | false | false | 17,119 | py | 35 | _simclumps.py | 33 | 0.491734 | 0.484841 | 0 | 661 | 24.900151 | 80 |
myracheng/treehacks19 | 17,549,236,408,152 | 14136fe5c094bccac9bb60496b08ceaab45c8685 | af4c325a5a20cb054f22723af9a693cdf8cda3e5 | /mysite/env/lib/python3.6/io.py | c3e531188f9c386d17f05fb666c54deb8ae9d757 | []
| no_license | https://github.com/myracheng/treehacks19 | aff1d6356f480dfdc4ca75f286fbcbd29c110a35 | 9e2d9195f749415eddcfabaceed0f9d911b12c7e | refs/heads/master | 2020-07-04T11:07:02.833157 | 2019-02-17T19:24:08 | 2019-02-17T19:24:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /Users/gracelu/anaconda3/lib/python3.6/io.py | UTF-8 | Python | true | false | 44 | py | 324 | io.py | 205 | 0.818182 | 0.75 | 0 | 1 | 44 | 44 |
ravikrranjan/learning-python | 13,194,139,574,102 | a409c32fcd83e4afe42ace8c7a3cae07bfbb047e | 1caff4eaf08d96c7a6f594499ec8664fc1b8cfe8 | /while_loop.py | 634945566842fa44487282a24dcf4ab08294fdb5 | []
| no_license | https://github.com/ravikrranjan/learning-python | f94598454e0a58a6fcce79fb3eea75402e2eac4a | 17d7628f12cec87913d79914d5c8b40b409bd371 | refs/heads/master | 2021-05-17T02:19:54.391310 | 2020-05-19T16:48:09 | 2020-05-19T16:48:09 | 250,573,197 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
number = 8
guess = int(input("I'm thinking a number between zero to ten, Can I gusess ? "))
while True:
if guess == number:
break
else:
guess = int(input('Nope, Try agian :'))
print("You gussed it, I was thinking about", number)
| UTF-8 | Python | false | false | 257 | py | 38 | while_loop.py | 34 | 0.614786 | 0.610895 | 0 | 10 | 24.5 | 80 |
akumar01/uoicorr | 8,254,927,176,621 | d1ea846d44a15a9899e50e052a3f2350e2f4208f | 30ebe031cc175c544f261333ee393bafa48ddef6 | /uoicorr_exp4.py | 9db85ed77ebcb5f47a22d8eda0b3d64ecf618345 | []
| no_license | https://github.com/akumar01/uoicorr | 7709cc8e62341c6b2c216f344f4279f4df7fde3e | b2b62dab3284f7bb5740131704e9331226328097 | refs/heads/master | 2020-04-08T13:06:08.288014 | 2019-05-01T23:06:04 | 2019-05-01T23:06:04 | 159,375,575 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import numpy as np
import h5py
import time
from scipy.linalg import block_diag
from sklearn.metrics import r2_score
from sklearn.linear_model import ElasticNetCV
from PyUoI.UoI_Lasso import UoI_Lasso
### parse arguments ###
parser = argparse.ArgumentParser()
parser.add_argument('--n_features', type=int, default=20)
parser.add_argument('--kappa', type=float, default=0.3)
parser.add_argument('--reps', type=int, default=50)
parser.add_argument('--sparsity', type=float, default=1.)
parser.add_argument('--LST', type=float, default=0.75)
parser.add_argument('--results_file', default='results.h5')
args = parser.parse_args()
# number of features
n_features = args.n_features
# inverse signal-to-noise ratio
kappa = args.kappa
# sparsity of within block features
sparsity = args.sparsity
# number of repetitions
reps = args.reps
# lower selection threshold
LST = args.LST
# filename for results
results_file = args.results_file
# set up other variables
n_samples = 5 * n_features # number of training samples
n_nonzero_beta = int(sparsity * n_features) # number of nonzero parameters
# correlations and selection thresholds
Ls = np.array([0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
# set up results file
results = h5py.File(results_file, 'w')
# result arrays: fits
betas = np.zeros((reps, n_features))
beta_hats_uoi = np.zeros((reps, Ls.size, n_features))
beta_hats_enet = np.zeros((reps, Ls.size, n_features))
# result arrays: explained variance performance
r2_uoi = np.zeros((reps, Ls.size))
r2_enet = np.zeros((reps, Ls.size))
r2_true = np.zeros((reps, Ls.size))
for rep in range(reps):
# choose true parameters
beta = np.random.uniform(low=0, high=10, size=(n_features, 1))
# determine sparsity indicies
mask = np.zeros(n_features)
mask[:n_nonzero_beta] = np.ones(n_nonzero_beta)
np.random.shuffle(mask)
# apply mask to parameters to set them equal to zero
beta = beta * mask[..., np.newaxis]
betas[rep, :] = beta.ravel()
# iterate over correlation strengths
for L_idx, L in enumerate(Ls):
# create covariance matrix for block
indices = np.arange(n_features)
distances = np.abs(np.subtract.outer(indices, indices))
Sigma = np.exp(-distances/L)
# draw samples
X = np.random.multivariate_normal(mean=np.zeros(n_features), cov=Sigma, size=n_samples)
X_test = np.random.multivariate_normal(mean=np.zeros(n_features), cov=Sigma, size=n_samples)
# signal and noise variance
signal_variance = np.sum(Sigma * np.dot(beta, beta.T))
noise_variance = kappa * signal_variance
# draw noise
noise = np.random.normal(loc=0, scale=np.sqrt(noise_variance), size=(n_samples, 1))
noise_test = np.random.normal(loc=0, scale=np.sqrt(noise_variance), size=(n_samples, 1))
# response variable
y = np.dot(X, beta) + noise
y_test = np.dot(X_test, beta) + noise_test
# apply uoi lasso
start = time.time()
uoi = UoI_Lasso(
normalize=True,
n_boots_sel=48,
n_boots_est=48,
selection_thres_min=LST,
n_selection_thres=48,
estimation_score='BIC'
)
uoi.fit(X, y.ravel())
beta_hat_uoi = uoi.coef_
print('uoi: ', time.time() - start)
start = time.time()
# apply elastic net
enet = ElasticNetCV(
l1_ratio=[0.01, .1, .5, .7, .9, .95, .99, 1],
normalize=True,
tol=1e-7,
max_iter = 100000
)
enet.fit(X, y.ravel())
beta_hat_enet = enet.coef_
print('enet: ', time.time() - start)
# store fits
beta_hats_uoi[rep, L_idx, :] = beta_hat_uoi
beta_hats_enet[rep, L_idx, :] = beta_hat_enet
# calculate test performance
r2_uoi[rep, L_idx] = r2_score(
y_test, np.dot(X_test, beta_hat_uoi)
)
r2_enet[rep, L_idx] = r2_score(
y_test, np.dot(X_test, beta_hat_enet)
)
r2_true[rep, L_idx] = r2_score(
y_test, np.dot(X_test, beta)
)
# store results in h5 file
results['betas'] = betas
results['beta_hats_uoi'] = beta_hats_uoi
results['beta_hats_enet'] = beta_hats_enet
results['r2_uoi'] = r2_uoi
results['r2_enet'] = r2_enet
results['r2_true'] = r2_true
results.close() | UTF-8 | Python | false | false | 3,992 | py | 77 | uoicorr_exp4.py | 35 | 0.687625 | 0.664579 | 0 | 126 | 30.690476 | 94 |
asteen27/netbox-bgp | 12,360,915,880,310 | 42ef12455beb3e3c5544f5e26d1936b730bfe18d | 2055463aab9d8272cfcfa3b6831b77e9b4893cd4 | /netbox_bgp/migrations/0009_netbox_bgp.py | 83f70cb73972f71b016566dba1c237772f1e8eda | [
"Apache-2.0"
]
| permissive | https://github.com/asteen27/netbox-bgp | b22421f9f0c7e90e2979c95897981ad4a2034261 | 8ee7a80484fe008408bbdc83a3da4bf2f2e50c8b | refs/heads/main | 2023-07-12T22:13:36.617871 | 2021-08-16T18:41:55 | 2021-08-16T18:41:55 | 396,925,486 | 0 | 0 | Apache-2.0 | true | 2021-08-16T18:40:42 | 2021-08-16T18:40:42 | 2021-08-13T17:51:29 | 2021-07-16T06:40:50 | 2,889 | 0 | 0 | 0 | null | false | false | # Generated by Django 3.1.3 on 2021-04-21 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('netbox_bgp', '0008_netbox_bgp'),
]
operations = [
migrations.AlterField(
model_name='bgpsession',
name='peer_group',
field=models.ManyToManyField(blank=True, to='netbox_bgp.BGPPeerGroup'),
),
]
| UTF-8 | Python | false | false | 422 | py | 24 | 0009_netbox_bgp.py | 23 | 0.604265 | 0.559242 | 0 | 18 | 22.444444 | 83 |
mcdaviddj/eyeExtend-Connect | 6,691,559,053,529 | 7d3bdffbe6fef3638631de9096e0ee38ff40a8c7 | e37acaf3732f937c2f9be7c26a2a3ce58ef0d962 | /AzureAD/AzureAD 1.0.0/aad_test.py | b5128ceb2e3c9bb4812808da3d842e6c8a974622 | [
"MIT"
]
| permissive | https://github.com/mcdaviddj/eyeExtend-Connect | 3d76546841a90a5390fa76d5a2148377226dbb3a | e1638210dda134ac481e763cc7c5b50241180089 | refs/heads/encode_session_id | 2023-08-01T05:58:31.313912 | 2021-09-28T16:33:23 | 2021-09-28T16:33:23 | 411,360,942 | 1 | 0 | null | false | 2021-09-28T16:37:33 | 2021-09-28T16:34:42 | 2021-09-28T16:35:28 | 2021-09-28T16:35:35 | 0 | 0 | 0 | 1 | Python | false | false | '''
Copyright © 2020 Forescout Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# v1.0.0 Azure Active Directory Test
# Keith Gilbert
from urllib import request, parse
logging.info("****************************** params: {}".format(params))
# Values for system.conf passed to params
response = {}
if ("connect_authorization_token" in params) and params["connect_authorization_token"] != "":
access_token = params["connect_authorization_token"]
if "connect_azuread_test_user" in params and params["connect_azuread_test_user"] != "":
test_user = params["connect_azuread_test_user"]
user_url_start = "https://graph.microsoft.com/v1.0/users?$filter=startswith(userPrincipalName%2C+\'"
user_url_end = "\')"
user_url = user_url_start + test_user + user_url_end
user_header = {"Authorization": "Bearer " + str(access_token)}
req2 = request.Request(user_url, headers=user_header)
resp2 = request.urlopen(req2, context=ssl_context)
req2_response = json.loads(resp2.read())
logging.info("******************************req2_response = {}".format(str(req2_response)))
if resp2.getcode() == 200 and req2_response["value"]:
logging.info("****************************** resp2.getcode() == 200")
response["succeeded"] = True
response["result_msg"] = "Successful connection : Test User = " + test_user + " >> " + str(req2_response["value"])
else:
response["succeeded"] = True
response["result_msg"] = "Successful connection : Test User doesn't exist"
else:
response["succeeded"] = True
response["result_msg"] = "Successful connection : Test User not defined"
else:
response["succeeded"] = False
response["result_msg"] = "Failed connection : couldn't connect to login.microsoftonline server." | UTF-8 | Python | false | false | 2,720 | py | 236 | aad_test.py | 147 | 0.718279 | 0.708349 | 0 | 54 | 49.37037 | 117 |
CellarDoorMedia/Lockr-CA | 8,005,819,086,899 | f4451e8744396d4d358c9490d079f21a682a44ac | d801b8de0e905ed668c49dd18022f705bf6a4032 | /script/uuid | 55327b037f8ea5411ea6512fef58b234a564d4cd | []
| no_license | https://github.com/CellarDoorMedia/Lockr-CA | 3c50ed383e4ef38c062669a8a8f85499331de1d5 | 5dfc2067ba01fd37a67c3875dd46694bd4235428 | refs/heads/master | 2021-01-24T23:57:19.890919 | 2016-05-19T21:10:21 | 2016-05-19T21:10:21 | 59,155,939 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
import uuid
print str(uuid.uuid4())
| UTF-8 | Python | false | false | 62 | 5 | uuid | 3 | 0.693548 | 0.66129 | 0 | 5 | 11.2 | 23 |
|
wsljmlin/spider | 3,831,110,868,976 | 05fec7cc8efb429f3fe49221afb3d5882549b88d | 5b9e7e8ff630bf73a30b80a452b2753f9b1b1904 | /spiderManager/route/spider.py | 75a720cb829e7c2afb51d299c1d976dc485ba20f | []
| no_license | https://github.com/wsljmlin/spider | b6fe9727de6c39145bae7e11349a5f98f057f5d0 | 63e1e938050b9399c4363f4a54d8f15d56e6368d | refs/heads/master | 2021-01-22T20:00:22.165610 | 2017-03-17T05:33:37 | 2017-03-17T05:33:37 | 85,274,543 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
#encoding=UTF-8
import sys
import os
import time
import re
import types
import json
from bs4 import BeautifulSoup
from flask import Blueprint
from flask import render_template
from flask import request
from model.loginVerify import requireLogin
from flask_wtf import Form
from wtforms import form
from wtforms import TextField, StringField, SelectField, SubmitField, PasswordField, validators
from wtforms.validators import DataRequired
from model.spiderModel import spiderModel
from model.spiderCron import spiderCron
from model.mysqldb import db
from spiderModel.spiderTool import spiderTool
from config import spiderModelPath
from config import runtimeSave
sys.path.append(spiderModelPath)
class sourceForm(Form):
source = SelectField(u'视频源:', validators=[validators.Length(min=1, max=25)])
type = SelectField(u'视频分类:', validators=[validators.Length(min=1, max=25)])
spiderModel = SelectField(u'爬虫模块:', validators=[validators.Length(min=1, max=25)])
example = SelectField(u'示例种子:')
url = StringField(u'播放地址:')
submitButton = SubmitField(u'开始爬取')
class cronForm(Form):
cron_id = StringField(u'定时id:', validators=[validators.Length(min=1, max=25)])
name = StringField(u'节目名称:', validators=[validators.Length(min=1, max=25)])
url = StringField(u'种子url:', validators=[validators.Length(min=1, max=25)])
spider = StringField(u'爬虫模块:', validators=[validators.Length(min=1, max=25)])
runTime = StringField(u'执行时间:')
submitButton = SubmitField(u'保存')
class mfspForm(Form):
type = SelectField(u'视频分类:',validators=[validators.Length(min=1, max=25)])
word = StringField(u'搜索内容:',validators=[validators.Length(min=1, max=25)])
submitButton = SubmitField(u'开始搜索')
bp = Blueprint('spider', __name__)
@bp.route('/')
@requireLogin
def spider():
return render_template('spider/spider.html')
@bp.route('/cron')
@requireLogin
def all():
totalPage =1
return render_template('spider/spiderCron.html',totalPage=totalPage, runtimeSave=runtimeSave)
@bp.route('/find',methods=['GET', 'POST'])
@requireLogin
def find():
getKeys = request.args.keys()
name = "%%"
pageNum = 1
totalPage = 1
pageListNum = 20
if "nameGet" in getKeys:
name = "%" + request.args.get("nameGet") + "%"
if "pageNum" in getKeys:
pageNum = int(request.args.get("pageNum").encode('utf8'))
SpiderCron = spiderCron()
data =[{"cron_id": u.id, "name": u.name, "url": u.url, "spider": u.spider, "runTime": {u.runTime: runtimeSave[int(str(u.runTime).encode('utf8'))]}}
for u in db.session.query(spiderCron).filter(spiderCron.name.ilike(name))]
totalPage = len(data)//pageListNum
if len(data) % pageListNum > 0:
totalPage += 1
if int(pageNum) > totalPage:
pageNum = 1
pageData = []
beginIndex = pageListNum*(pageNum-1)
endIndex = pageListNum*pageNum
if endIndex > len(data):
endIndex = len(data)
for i in range(beginIndex, endIndex):
pageData.append(data[i])
rtData = {"totalPage": totalPage, "pageNum": pageNum, "data": pageData}
rtStr = json.dumps(rtData)
return rtStr
@bp.route('/album',methods=['GET', 'POST'])
@requireLogin
def album():
jsonData = ""
nameSave = ""
urlSave = ""
spiderSave = ""
form = sourceForm()
form.source.choices = [(u.source, u.source) for u in db.session.query(spiderModel.source).group_by(spiderModel.source)]
form.type.choices = [(u.type, u.type) for u in db.session.query(spiderModel.type).group_by(spiderModel.type)]
form.spiderModel.choices = [(u.spider, u.spider) for u in db.session.query(spiderModel.spider).group_by(spiderModel.spider)]
form.example.choices = [(u.url, u.url) for u in db.session.query(spiderModel.url).group_by(spiderModel.url)]
form.submitButton = u'开始爬取'
if form.validate_on_submit():
requestForm = request.form
keyList = requestForm.keys()
source = ""
type = ""
spiderF = ""
example = ""
submitButton = ""
url = ""
if "source" in keyList:
source = requestForm["source"]
if "type" in keyList:
type = requestForm["type"]
if "spiderModel" in keyList:
spiderModelName = requestForm["spiderModel"]
if "example" in keyList:
example = requestForm["example"]
if 'submitButton' in keyList:
submitButton = requestForm['submitButton']
if 'url' in keyList:
url = requestForm['url']
if submitButton == '1' and source != "":
form.type.choices = [(u.type, u.type) for u in db.session.query(spiderModel.type).filter(spiderModel.source == source).group_by(spiderModel.type)]
form.spiderModel.choices = [(u.spider, u.spider) for u in db.session.query(spiderModel.spider).filter(spiderModel.source == source).group_by(spiderModel.spider)]
elif submitButton == '2' and type != "" and source != "":
form.spiderModel.choices = [(u.spider, u.spider) for u in db.session.query(spiderModel.spider).filter(spiderModel.source == source, spiderModel.type == type).group_by(spiderModel.spider)]
if len(form.spiderModel.choices) == 1:
form.example.choices = [(u.url, u.url) for u in db.session.query(spiderModel.url).filter(spiderModel.source == source, spiderModel.type == type).group_by(spiderModel.url)]
elif submitButton == '3'and spiderModelName != "" and type != "" and source != "":
form.spiderModel.choices = [(spiderModelName, spiderModelName) ]
form.type.choices = [(u.type, u.type) for u in db.session.query(spiderModel.type).filter(spiderModel.spider == spiderModelName).group_by(spiderModel.type)]
form.example.choices = [(u.url, u.url) for u in db.session.query(spiderModel.url).filter(spiderModel.spider == spiderModelName).group_by(spiderModel.url)]
elif submitButton != '1' and submitButton != '2' and url != "":
spider_class = ""
modelList = os.listdir(spiderModelPath)
if spiderModelName + ".py" not in modelList:
jsonData = u"{'msg':'该爬取模块不存在!'}"
else:
try:
spider_module = __import__(spiderModelName)
spider_class = eval("spider_module." + spiderModelName + "()")
except:# IOError, e:
jsonData = u"{'msg':'该爬取模块无法初始化!'}"
if spider_class != "":
try:
spider_class.seedList = [url]
spider_class.seedSpider()
jsonData = spider_class.jsonData
urlSave = url
spiderSave = spiderModelName
nameSave = spider_class.program['name']
if jsonData == "":
jsonData = u"{'msg':'爬取失败,请选用其他模块,或联系工程师!'}"
except:# IOError, e:
jsonData = u"{'msg':'爬取失败,请选用其他模块,或联系工程师!'}"
elif submitButton != '1' and submitButton != '2' and url == "":
jsonData = u"{'msg':'url不能为空!'}"
return render_template('spider/spiderTmp.html', form=form, jsonData=jsonData, urlSave=urlSave, nameSave=nameSave, spiderSave=spiderSave, runtimeSave=runtimeSave)
@bp.route('/shangchuan', methods=['GET', 'POST'])
@requireLogin
def shangchuan():
jsonData = ""
postKeys = request.form.keys()
if 'jsonData' in postKeys:
jsonData = request.form['jsonData']
if type(jsonData) == types.UnicodeType:
jsonData = jsonData.encode('utf8')
if jsonData != "":
fileName = 'data' + os.path.sep + time.strftime('cron_manager_spider_tmp_%Y%m%d%H%M%S.txt', time.localtime(time.time()))
openFile = open(fileName,'w')
openFile.write(str(jsonData))
jsonData = u"{'msg':'数据已经上传,请耐心等待!'}"
return jsonData
jsonData = u"{'msg':'上传数据异常!'}"
return jsonData
@bp.route('/save', methods=['GET', 'POST'])
@requireLogin
def save():
postKeys = request.form.keys()
id = ""
name = ""
url = ""
spiderF = ""
runtime = ""
delete = False
if "idSave" in postKeys:
id = request.form["idSave"]
if "delete" in postKeys:
delete = True
if id != "":
SpiderCron = db.session.query(spiderCron).filter(spiderCron.id == id).first()
else:
SpiderCron = spiderCron()
if "nameSave" in postKeys:
name = request.form["nameSave"]
SpiderCron.name = name
if "urlSave" in postKeys:
url = request.form["urlSave"]
SpiderCron.url = url
if "spiderSave" in postKeys:
spiderF = request.form["spiderSave"]
SpiderCron.spider = spiderF
if "runtimeSave" in postKeys:
runtime = request.form["runtimeSave"]
SpiderCron.runTime = runtime
if id == "":
db.session.add(SpiderCron)
elif id != "" and delete:
db.session.delete(SpiderCron)
db.session.commit()
return "Data base connected."
@bp.route('/cronData', methods=['GET', 'POST'])
def cronData():
getKeys = request.args.keys()
if 'runTime' in getKeys:
runTime = request.args['runTime']
listData = [{'spider': u.spider, 'url': u.url} for u in db.session.query(spiderCron).filter(spiderCron.runTime == str(runTime))]
strData = json.dumps(listData)
return strData
else:
return ""
@bp.route('/mfsp',methods=['GET', 'POST'])
@requireLogin
def mfsp():
form = mfspForm()
form.type.choices=[('1', u'电影'),('2', u'电视剧'),('5', u'动漫'),('4', u'综艺'), ( '9',u'纪录片')]
data = []
if form.validate_on_submit():
requestForm = request.form
keyList = requestForm.keys()
word = ""
type = ""
if "word" in keyList:
word = requestForm["word"]
if "type" in keyList:
type = requestForm["type"]
if word != "" and type != "":
seed = "http://www.beevideo.tv/api/video2.0/video_search.action?channelId=%s&searchKey=%s" %(type, word)
doc = spiderTool.getHtmlBody(seed)
soup = BeautifulSoup(doc, from_encoding="utf8")
video_list_p = soup.find('video_list')
if video_list_p is not None:
video_list = video_list_p.find_all('video_item')
for each in video_list:
item = {}
url = ""
name = ""
type = ""
id = each.find('id')
if id is not None:
id_num = id.get_text()
url = 'http://www.beevideo.tv/api/video2.0/video_detail_info.action?videoId=' + id_num
name_tag = each.find('name')
if name_tag:
name = name_tag.get_text()
type_tag = each.find('channel')
if type_tag:
type = type_tag.get_text()
if name != "" and type != "" and url != "":
item = {"name":name, "type": type, "url": url}
data.append(item)
return render_template('spider/mfsp.html',form=form, data=data) | UTF-8 | Python | false | false | 11,859 | py | 130 | spider.py | 65 | 0.584367 | 0.579249 | 0 | 282 | 38.882979 | 199 |
nestorwheelock/stripe-python | 5,437,428,615,225 | f83ba02da02e8dcbaedf2e116cb424263e049e09 | 1b0fb627d3412dc8dedb6246e5d71ea30831f737 | /tests/api_resources/test_invoice.py | aebf317cbe406b4eecfe7cf348d35f9ec0cf28a4 | [
"MIT"
]
| permissive | https://github.com/nestorwheelock/stripe-python | ee00f8b01d325b935ef548fbd8a536900cd808c9 | 2be8e47a1e24f1394038d478b7e17b3962bb1b92 | refs/heads/master | 2020-03-22T00:33:22.087701 | 2018-06-29T09:50:33 | 2018-06-29T09:50:33 | 139,252,114 | 1 | 0 | null | true | 2018-06-30T13:34:21 | 2018-06-30T13:34:21 | 2018-06-30T01:49:43 | 2018-06-29T11:59:20 | 1,672 | 0 | 0 | 0 | null | false | null | from __future__ import absolute_import, division, print_function
import stripe
from tests.helper import StripeTestCase
TEST_RESOURCE_ID = 'in_123'
class InvoiceTest(StripeTestCase):
def test_is_listable(self):
resources = stripe.Invoice.list()
self.assert_requested(
'get',
'/v1/invoices'
)
self.assertIsInstance(resources.data, list)
self.assertIsInstance(resources.data[0], stripe.Invoice)
def test_is_retrievable(self):
resource = stripe.Invoice.retrieve(TEST_RESOURCE_ID)
self.assert_requested(
'get',
'/v1/invoices/%s' % TEST_RESOURCE_ID
)
self.assertIsInstance(resource, stripe.Invoice)
def test_is_creatable(self):
resource = stripe.Invoice.create(
customer='cus_123'
)
self.assert_requested(
'post',
'/v1/invoices'
)
self.assertIsInstance(resource, stripe.Invoice)
def test_is_saveable(self):
resource = stripe.Invoice.retrieve(TEST_RESOURCE_ID)
resource.metadata['key'] = 'value'
resource.save()
self.assert_requested(
'post',
'/v1/invoices/%s' % resource.id
)
def test_is_modifiable(self):
resource = stripe.Invoice.modify(
TEST_RESOURCE_ID,
metadata={'key': 'value'}
)
self.assert_requested(
'post',
'/v1/invoices/%s' % TEST_RESOURCE_ID
)
self.assertIsInstance(resource, stripe.Invoice)
def test_can_pay(self):
resource = stripe.Invoice.retrieve(TEST_RESOURCE_ID)
resource = resource.pay()
self.assert_requested(
'post',
'/v1/invoices/%s/pay' % resource.id
)
self.assertIsInstance(resource, stripe.Invoice)
def test_can_upcoming(self):
resource = stripe.Invoice.upcoming()
self.assert_requested(
'get',
'/v1/invoices/upcoming'
)
self.assertIsInstance(resource, stripe.Invoice)
def test_can_upcoming_and_subscription_items(self):
resource = stripe.Invoice.upcoming(
subscription_items=[
{"plan": "foo", "quantity": 3}
]
)
self.assert_requested(
'get',
'/v1/invoices/upcoming',
{
'subscription_items': {
"0": {
"plan": "foo",
"quantity": 3,
},
},
},
)
self.assertIsInstance(resource, stripe.Invoice)
| UTF-8 | Python | false | false | 2,669 | py | 37 | test_invoice.py | 35 | 0.536905 | 0.530161 | 0 | 93 | 27.698925 | 64 |
chenbiningbo/programming_learning | 19,207,093,759,066 | 0fae41a0235ed99eb360c0e59f6d3cb82d3c38f7 | d42a65c149b004228fd72586a0f79b17b0545e92 | /MIT_learning/session5_dataanlysis_panda/Problem/problem5_cus_demo.py | 87bcc29738347533f5cbd3fc01f9d5b85cec2ef7 | []
| no_license | https://github.com/chenbiningbo/programming_learning | 34a642823c6bc8d54c4b912fa51a4ad6e9f23c15 | 2cfb2ca509d7d1ccc4779be4d7a35625010cb98a | refs/heads/master | 2020-12-27T12:51:05.640281 | 2020-02-18T14:02:40 | 2020-02-18T14:02:40 | 237,903,158 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Problem 5
'''
HSBD Bank has hired Mario, a data analyst, to do some data analysis to help the company
to better understand the customers' demographic characteristics. The upper management
has some questions, and Mario must find the answer by analyzing the data available.
To start, please run the cell below to get the data provided by the bank (it might take a few seconds to finish running).
'''
# Run this cell but DO NOT MODIFY
# Do not modify this part, it belongs to the question
#------------------------------------------------------------------------------------------------------------------
import pandas as pd
url='https://raw.githubusercontent.com/juliencohensolal/BankMarketing/master/rawData/bank-additional-full.csv'
data = pd.read_csv(url,sep=";") # use sep="," for coma separation.
Age = data['age']
data = data[['age', 'job', 'marital']].copy()
#------------------------------------------------------------------------------------------------------------------
data.to_csv("../Problem/pro_data.csv")
'''
The series Age (defined in the cell above) contains information about the customers' age.
Write a script that prints the customers' age every 5,000th customers.
This means printing the value associated to the 5,000th element in
the series, the 10,000th element, the 15,000th element, and so on.
'''
# Write your script for 5.1 here
total_count = Age.count()
i=5000
while i < total_count:
print('The age of the '+str(i)+' th customer is '+str(Age.iloc[i]))
i+=5000
#5.2
# Print the name of all the columns in the dataframe
print(data.columns.values)
# Print the shape of the dataframe to understand the size of data
print(data.shape)
# In one line of code, print the average, the median, the maximum and the minimum age of the customers.
print('average Age:'+str(Age.mean())+' median Age: '+str(Age.median())+' max Age: '+str(Age.max())+'min Age:'+str(Age.min()))
#5.3
# Create a new column called normalized_age that contains the age of each customer divided by the maximum age.
max_age = Age.max()
data['normalized_age'] = data.apply(lambda x : x.age/max_age, axis=1)
# Print the job information for all customers between index 500 and index 515 (including 500 and 515).
for i in range(500,516):
customer = data.iloc[i]
print(str(i)+' '+customer.job)
# Delete the marital column from the dataframe
data6 = data.drop(columns=['marital'])
print(data6)
# Save the dataframe as an excel file to your local computer and name it Problem5.
# FIX_ME
data6.to_csv('../Problem/Problem5.csv') | UTF-8 | Python | false | false | 2,535 | py | 93 | problem5_cus_demo.py | 66 | 0.663511 | 0.641026 | 0 | 59 | 41.983051 | 125 |
jpinzonc/Senators_votes | 16,131,897,209,217 | 2d5ac1aafd5106b3e98cbf5ec0c3d3276ed62278 | 4117b0d34ef77835367b2770a14c3b4c36ec4bba | /map_app/senate_env/lib/python3.6/codecs.py | ae5990c6674c2dda50c20c778f0c34cd21531ea1 | []
| no_license | https://github.com/jpinzonc/Senators_votes | e10d7f00a07edf84295152e607cd048f27556b71 | 4d62a92ad0327443df408b43c4676b6e9ca7ca46 | refs/heads/master | 2022-07-10T11:01:14.439674 | 2019-05-25T01:24:50 | 2019-05-25T01:24:50 | 97,988,403 | 0 | 0 | null | false | 2022-06-21T21:12:52 | 2017-07-21T21:37:17 | 2019-05-25T01:24:57 | 2022-06-21T21:12:51 | 40,702 | 0 | 0 | 6 | Python | false | false | /Users/jpinzon/anaconda3/lib/python3.6/codecs.py | UTF-8 | Python | false | false | 48 | py | 129 | codecs.py | 53 | 0.833333 | 0.770833 | 0 | 1 | 48 | 48 |
snehananth/bookstore | 8,031,588,892,159 | 9f6c25a0175c41ca21068c1efae4ededb2687fc5 | 28c5a2b3734ed2bf66da598a0bd6343b2b5a0d5e | /book/urls.py | 40b286489f45f3cd71bad147b63e10800338cbdc | []
| no_license | https://github.com/snehananth/bookstore | 703095bc41b9101174e6d5b39b9d2f45a4535337 | 6c6992c97925f7c182754700287e21d31e0ca954 | refs/heads/master | 2021-08-08T15:07:37.208864 | 2017-11-10T15:18:40 | 2017-11-10T15:18:40 | 110,231,419 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from .views import BookDetail,BookListView
urlpatterns = [
url(r'^book/$', BookListView.as_view()),
url(r'^bookupdate/(?P<id>[0-9]+)/$', BookDetail.as_view()),
] | UTF-8 | Python | false | false | 203 | py | 15 | urls.py | 15 | 0.669951 | 0.660099 | 0 | 7 | 28.142857 | 63 |
active-sludge/TodoLoo | 14,877,766,716,685 | f60b27a78ca7f55f115e1d213ee90c891d1bde48 | b251ab344b3e3ba81ffb44b4a14992257ff370bb | /todo/tests.py | b244f6b8171f62c44a58b2ef3d2c802c0c2e7e43 | []
| no_license | https://github.com/active-sludge/TodoLoo | 62201b2082e43c61431d1abf0c0c04f5267dfec4 | 68800dc75019dc257640dd9f75fbfa2c708effea | refs/heads/main | 2023-08-23T07:52:01.034397 | 2021-10-12T13:59:30 | 2021-10-12T13:59:30 | 410,530,396 | 0 | 0 | null | false | 2021-10-12T13:57:54 | 2021-09-26T11:16:02 | 2021-10-12T13:52:11 | 2021-10-12T13:54:22 | 1,460 | 0 | 0 | 1 | JavaScript | false | false | from django.test import TestCase, Client
from django.contrib.auth.models import User
from todo.models import Article, Todo
from django.urls import reverse
c = Client()
class ViewTestsCase(TestCase):
def setUp(self):
self.credentials = {
'username': 'testuser',
'password': 'secret'}
User.objects.create_user(**self.credentials)
def test_home_page_accessed_successfully(self):
response = c.get('/')
self.assertEqual(response.status_code, 200)
def test_user_lands_on_home_after_login(self):
c.login(username='testuser', password='secret')
response = self.client.get(reverse('currenttodos'))
self.assertEqual(response.status_code, 302)
def test_view_url_exists_at_desired_location(self):
c.login(username='testuser', password='secret')
response = self.client.get(reverse('currenttodos'))
self.assertEqual(response.status_code, 302)
class LogInTestCase(TestCase):
def setUp(self):
self.credentials = {
'username': 'testuser',
'password': 'secret'}
User.objects.create_user(**self.credentials)
def test_login(self):
# send login data
response = self.client.post('/login/', self.credentials, follow=True)
# should be logged in now
self.assertTrue(response.context['user'].is_active)
class TodoTestCase(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create_user(username='testuser', password='12345')
Todo.objects.create(
title="Buy milk",
memo="Low fat if possible",
user=user
)
def test_todo_has_title(self):
todo = Todo.objects.get(id=1)
self.assertTrue(todo.title)
def test_todo_has_memo(self):
todo = Todo.objects.get(id=1)
self.assertTrue(todo.memo)
def test_todo_has_user(self):
todo = Todo.objects.get(id=1)
self.assertTrue(todo.user)
def test_update_todo_memo(self):
todo = Todo.objects.get(id=1)
todo.memo = "New Memo"
todo.save()
self.assertEqual(todo.memo, 'New Memo')
class ArticleTestCase(TestCase):
def setUp(self):
self.credentials = {
'username': 'testuser',
'password': 'secret'}
User.objects.create_user(**self.credentials)
c.post('/login/', self.credentials, follow=True)
Article.objects.create(
article_id="324212",
article_title="Example article Title",
article_abstract="Example article abstract that is a little bit longer",
author_list="Ben, Sen, O",
keyword_list="Aids, covid",
pub_date="01.02.2004 00:00:00"
)
def test_when_api_called_articles_are_saved(self):
response = c.get('refresh/')
articles_saved = Article.objects.exists()
self.assertTrue(response.status_code, 200)
self.assertTrue(articles_saved)
def test_bookmarked_article_becomes_todo(self):
response = c.get('/bookmark/324212/')
todo = Todo.objects.all()
self.assertTrue(response.status_code, 200)
self.assertTrue(todo)
| UTF-8 | Python | false | false | 3,228 | py | 11 | tests.py | 7 | 0.61772 | 0.60223 | 0 | 108 | 28.888889 | 84 |
OpenITI/openiti | 16,415,365,026,211 | 2fc32f57df61f30be6925c27e4fc4655a98d2242 | 1b62a2c48dec8449bf146ec2a572bb58994873c5 | /build/lib/openiti/release/collect_openITI_version.py | a5d3ade23442fa95185b33985387fe962c340123 | [
"MIT"
]
| permissive | https://github.com/OpenITI/openiti | 32bac70f87e85623d9f3463edfbb32986fe7b33e | 5ebec640c634ef57569d7ef4e1dc0dd1b188dcb3 | refs/heads/master | 2023-03-15T21:26:56.600717 | 2023-03-08T12:00:02 | 2023-03-08T12:00:02 | 224,496,178 | 10 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copies all data from the XXXXAH directories to a single directory in order to publish a version of OpenITI.
import os
import shutil
import sys
import re
def copy_ah_data(source_dir, dest_dir):
# All XXXXAH direcortories
ah_dirs = os.listdir(source_dir)
for ad in ah_dirs:
# Get the path to the "data" folder (in each XXXXAH dir)
data_path = os.path.join(source_dir, ad, "data")
# If the data directory exists (for cases the "data" dir is not available, like 1475AH)
if os.path.exists(data_path):
# Get the list of directories in "data" directory
cur_dirs = os.listdir(data_path)
# Copy the dirs in "/data"
for cpy_d in cur_dirs:
# Source is the path to the current folder in data folder (cpy_d) that will be copied,
# Target is a join of the target path (given as input) and the current folder (cpy_d)
shutil.copytree(os.path.join(data_path, cpy_d), os.path.join(dest_dir, cpy_d))
for root, dirs, files in os.walk(os.path.join(dest_dir, cpy_d)):
# texts = [f for f in files if
# os.path.isfile(os.path.join(dest_dir, cpy_d, f)) and
# re.search("^\d{4}\w+\.\w+\.\w+-\w{4}(\.(mARkdown|inProgress|completed))?$", f)]
# texts_noExt = set([re.split("\.(mARkdown|inProgress|completed)", t)[0] for t in texts])
for f in files:
no_ext_file = re.split("\.(mARkdown|inProgress|completed)", f)[0]
no_ext_path = os.path.join(root, no_ext_file)
if f.endswith(".mARkdown"):
if os.path.exists(no_ext_path + ".completed"):
# try:
os.remove(no_ext_path + ".completed")
# except OSError:
# pass
if os.path.exists(no_ext_path + ".inProgress"):
# try:
os.remove(no_ext_path + ".inProgress")
# except OSError:
# pass
elif f.endswith(".completed"):
# try:
if os.path.exists(no_ext_path + ".inProgress"):
os.remove(no_ext_path + ".inProgress")
# except OSError:
# pass
elif f.endswith(".inProgress"):
# try:
os.remove(os.path.join(root, f))
# except OSError:
# pass
else:
print("%s repository doesn't have any 'data' directory!" % ad)
if __name__ == '__main__':
source = input("Enter the source directory: ")
target = input("Enter the target directory: ")
if len(sys.argv) > 0:
if not os.path.exists(source):
print("source directory doesn't exists. Re-run the script and give the source!")
else:
copy_ah_data(source, target)
else:
print("Give the path to the script...!")
| UTF-8 | Python | false | false | 3,331 | py | 84 | collect_openITI_version.py | 59 | 0.47193 | 0.469228 | 0 | 71 | 45.901408 | 109 |
dagamargit/ejemplos-tkinter | 5,841,155,526,624 | 5635fec7a5380de3b7508475649f392c0606f4e6 | 97738198d0eda1baa0e0f8fa961564bc72112723 | /ej20_canvas_id_tags.py | adaa44945af83edb21903c8001b7ecf20f3b774a | []
| no_license | https://github.com/dagamargit/ejemplos-tkinter | 2eaf4b56097d1c4b2e37080393df2907e9620cb8 | 64cb99b8df3ad32c1bfd0ce81bae8496edc66415 | refs/heads/master | 2021-07-17T05:24:22.434180 | 2021-02-20T17:28:02 | 2021-02-20T17:28:02 | 239,290,374 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
from tkinter import ttk
class Aplicacion:
def __init__(self):
self.ventana1=tk.Tk()
self.crear_botones()
self.canvas1=tk.Canvas(self.ventana1, width=600, height=400, background="black")
self.canvas1.grid(column=0, row=1)
self.linea=self.canvas1.create_line(0, 0, 100,50, fill="white")
self.rectangulo=self.canvas1.create_rectangle(150,10, 300,110, fill="white")
self.ovalo=self.canvas1.create_oval(400,10,500,150, fill="red")
self.canvas1.create_rectangle(100,300,150,350,fill="#aaaaaa", tag="cuadrado")
self.canvas1.create_rectangle(200,300,250,350,fill="#555555", tag="cuadrado")
self.canvas1.create_rectangle(300,300,350,350,fill="#cccccc", tag="cuadrado")
self.ventana1.mainloop()
def crear_botones(self):
self.labelframe1=ttk.LabelFrame(self.ventana1,text="opciones")
self.labelframe1.grid(column=0, row=0, sticky="w", padx=5, pady=5)
self.boton1=ttk.Button(self.labelframe1, text="borrar linea", command=self.borrar_linea)
self.boton1.grid(column=0, row=0, padx=5)
self.boton2=ttk.Button(self.labelframe1, text="borrar rectángulo", command=self.borrar_rectangulo)
self.boton2.grid(column=1, row=0, padx=5)
self.boton3=ttk.Button(self.labelframe1, text="borrar óvalo", command=self.borrar_ovalo)
self.boton3.grid(column=2, row=0, padx=5)
self.boton4=ttk.Button(self.labelframe1, text="borrar todos los cuadrados", command=self.borrar_cuadrados)
self.boton4.grid(column=3, row=0, padx=5)
self.boton5=ttk.Button(self.labelframe1, text="borrar todos", command=self.borrar_todos)
self.boton5.grid(column=4, row=0, padx=5)
def borrar_linea(self):
self.canvas1.delete(self.linea)
def borrar_rectangulo(self):
self.canvas1.delete(self.rectangulo)
def borrar_ovalo(self):
self.canvas1.delete(self.ovalo)
def borrar_cuadrados(self):
self.canvas1.delete("cuadrado")
def borrar_todos(self):
self.canvas1.delete(tk.ALL)
aplicacion1=Aplicacion() | UTF-8 | Python | false | false | 2,143 | py | 50 | ej20_canvas_id_tags.py | 49 | 0.66978 | 0.60766 | 0 | 47 | 44.574468 | 114 |
sidyvan/autoescola | 12,919,261,669,247 | 345fcfb40b0eb094417799f99ddbbc85cb10e669 | f7e076242848e48285670c248f4106a3348a1ca9 | /autoescolaweb/cliente/migrations/0002_auto_20170224_1734.py | 720c8966aceedbfe24731720923b25fd56a25c2b | []
| no_license | https://github.com/sidyvan/autoescola | 181e3a2b49a99004f4351490651354a26a9af671 | f5370a2a6a3b1ab6ab07a5a617f1985f300c02c0 | refs/heads/master | 2017-08-03T20:07:23.948264 | 2017-03-11T16:40:08 | 2017-03-11T16:40:08 | 81,997,520 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-24 20:34
from __future__ import unicode_literals
import cliente.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cliente', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='autoescola',
name='logo',
field=models.ImageField(blank=True, default='', null=True, upload_to=cliente.models.upload_location),
),
migrations.AddField(
model_name='autoescolauser',
name='foto',
field=models.ImageField(blank=True, default='', null=True, upload_to=cliente.models.upload_location),
),
]
| UTF-8 | Python | false | false | 745 | py | 20 | 0002_auto_20170224_1734.py | 11 | 0.612081 | 0.583893 | 0 | 26 | 27.653846 | 113 |
mabuix/stock_ver2 | 6,992,206,769,478 | cffb9d2e71759a9d0793b85ab2da39f644bb2a52 | 4d6b32ecc1e9d10de309e56c3c896baafb66bed7 | /stock_app/stock_crawl/stock_crawl/spiders/fundamentals_crawl.py | 5633a3efb6f32ab83883ed517f4e52dd4e10653c | []
| no_license | https://github.com/mabuix/stock_ver2 | 4a6a45e740ade05ad86fc92a5af65543e9e1999e | 32daf879b10fea82c5a8c710afa870a5af3a5ee0 | refs/heads/master | 2020-04-07T11:23:40.781615 | 2018-03-07T08:59:24 | 2018-03-07T08:59:24 | 124,207,025 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
import os
# ItemのFundamentalsクラスをインポート.
from stock_crawl.items import Fundamentals
class FundamentalsCrawlSpider(CrawlSpider):
# Spiderの名前.
name = 'fundamentals_crawl'
# クロール対象とするドメインのリスト.
allowed_domains = ['stocks.finance.yahoo.co.jp']
code_file_path = '../../code.txt'
# ファイルの存在チェック.
is_file_exist = os.path.exists(code_file_path)
if not is_file_exist:
raise FileNotFoundError('証券コードの読み込みファイルがありません。')
url = 'https://stocks.finance.yahoo.co.jp/stocks/detail/?code='
# クロールを開始するURLのリスト.
start_urls = []
# スクレイピング対象銘柄コードをファイルから読み込んで、銘柄紹介ページURLのリストを作る.
with open(code_file_path, 'r') as codes:
for code in codes:
start_urls.append(url + code.rstrip())
def parse(self, response):
"""
銘柄詳細のページからファンダメンタル情報を抜き出す.
"""
# Fundamentalsオブジェクトを作成.
item = Fundamentals()
code = response.css('#stockinf > div.stocksDtl.clearFix > div.forAddPortfolio > dl > dt::text').extract_first()
item['code'] = int(code)
# 先頭から、「【」まで抽出
item['name'] = response.css('title::text').re_first('^[^【]*')
market_capitalization = response.css('#rfindex > div.chartFinance > div:nth-child(1) > dl > dd > strong::text').extract_first()
item['market_capitalization'] = int(market_capitalization.replace(',', ''))
outstanding_shares = response.css('#rfindex > div.chartFinance > div:nth-child(2) > dl > dd > strong::text').extract_first()
item['outstanding_shares'] = int(outstanding_shares.replace(',', ''))
yield item # Itemをyieldして、データを抽出する. | UTF-8 | Python | false | false | 2,057 | py | 16 | fundamentals_crawl.py | 13 | 0.651435 | 0.650264 | 0 | 45 | 36.955556 | 135 |
lmeribal/portfolio | 14,740,327,762,669 | 57bb63c9a628b92098c648050ad9dd5d4e84e44a | e992b23dbf791e2b1fa562dc5b5cf678e54419cd | /infopole/teensComments/Татарстан/Облако тегов.py | ec61374d94d68417b6694ab3e6559b9178694eb2 | []
| no_license | https://github.com/lmeribal/portfolio | 4815ceaf508dd1cc0597f42403c92fcbc4c5f991 | 1adb77d5c6a5f1a52881180fe7fdae9f8db250b6 | refs/heads/master | 2022-03-21T16:45:03.301439 | 2019-10-28T21:17:54 | 2019-10-28T21:17:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import re
from wordcloud import WordCloud, STOPWORDS
import pandas as pd
text = []
f = open('post.txt','r')
for line in f:
line.replace('на ','')
line.replace('ты ','')
line.replace('как ','')
line.replace('анон','')
line.replace('Анон','')
line.replace('админ','')
line.replace('если','')
line.replace('ты ','')
line.replace('он ','')
line.replace('из ','')
line.replace('что ','')
line.replace('мне ','')
line.replace('бы ','')
line.replace('вы ','')
line.replace('то ','')
line.replace('все ','')
line.replace('еще ','')
line.replace('че ','')
line.replace('На ','')
line.replace('ну ','')
line.replace('не ','')
line.replace('Не ','')
line.replace('ask','')
line.replace('кто ','')
line.replace('как ','')
line.replace('но ','')
line.replace('Ребят ','')
line.replace('от ','')
line.replace('Aнон ','')
line.replace('Анoн','')
line.replace('на ','')
line.replace('На','')
line.replace('ты ','')
line.replace('Ты','')
line.replace('Так ','')
line.replace('для ','')
line.replace('Не анон','')
line.replace('Анонимно','')
line.replace('уже ','')
line = re.sub(r'анон','',line)
line = re.sub(r'Анон','',line)
line = re.sub(r'АНОН','',line)
line = re.sub(r'не','',line)
line = re.sub(r'на','',line)
line = re.sub(r'что','',line)
line = re.sub(r'ты','',line)
line = re.sub(r'из ','',line)
line = re.sub(r'как ','',line)
line = re.sub(r'кто','',line)
line = re.sub(r'это','',line)
line = re.sub(r'то ','',line)
line = re.sub(r'вы ','',line)
line = re.sub(r'есть ','',line)
line = re.sub(r'не ','',line)
line = re.sub(r'так ','',line)
line = re.sub(r'все ','',line)
line = re.sub(r'по ','',line)
line = re.sub(r'он ','',line)
line = re.sub(r'Не ','',line)
line = re.sub(r'когда ','',line)
line = re.sub(r'если ','',line)
line = re.sub(r'ask','',line)
line = re.sub(r'имно ','',line)
line = re.sub(r'но ','',line)
line = re.sub(r'за ','',line)
line = re.sub(r'Как','',line)
line = re.sub(r'ну ','',line)
line = re.sub(r'vk','',line)
line = re.sub(r'очень ','',line)
line = re.sub(r'го ','',line)
line = re.sub(r'тебя ','',line)
line = re.sub(r'школе ','',line)
line = re.sub(r'или ','',line)
line = re.sub(r'же ','',line)
line = re.sub(r'меня ','',line)
line = re.sub(r'вас ','',line)
line = re.sub(r'вам ','',line)
line = re.sub(r'че ','',line)
line = re.sub(r'бы ','',line)
line = re.sub(r'до ','',line)
line = re.sub(r'Ну ','',line)
line = re.sub(r'еще ','',line)
line = re.sub(r'тебя ','',line)
line = re.sub(r'от ','',line)
line = re.sub(r'про ','',line)
line = re.sub(r'мы ','',line)
line = re.sub(r'Ребят ','',line)
line = re.sub(r'Ну ','',line)
line = re.sub(r'имно','',line)
line = re.sub(r'Админ ','',line)
line = re.sub(r'Когда ','',line)
line = re.sub(r'только ','',line)
line = re.sub(r'для ','',line)
line = re.sub(r'почему ','',line)
line = re.sub(r'да ','',line)
line = re.sub(r'всем ','',line)
line = re.sub(r'всех ','',line)
line = re.sub(r'школы ','',line)
line = re.sub(r'ни ','',line)
line = re.sub(r'где ','',line)
line = re.sub(r'так ','',line)
line = re.sub(r'Если ','',line)
line = re.sub(r'Есть ','',line)
line = re.sub(r'Давайте ','',line)
line = re.sub(r'класс ','',line)
line = re.sub(r'Арсен ','',line)
line = re.sub(r'пожалуйста ','',line)
line = re.sub(r'хочу ','',line)
line = re.sub(r'Все ','',line)
line = re.sub(r'нравится ','',line)
line = re.sub(r'шк ','',line)
line = re.sub(r'нибудь ','',line)
line = re.sub(r'будет ','',line)
line = re.sub(r'Диа ','',line)
line = re.sub(r'http','',line)
line = re.sub(r'com','',line)
line = re.sub(r'себя ','',line)
line = re.sub(r'тебе ','',line)
line = re.sub(r'ли ','',line)
line = re.sub(r'Вы ','',line)
line = re.sub(r'Нравится ','',line)
line = re.sub(r'сегодня ','',line)
line = re.sub(r'Так ','',line)
line = re.sub(r'пишите ','',line)
line = re.sub(r'кому ','',line)
line = re.sub(r'лс ','',line)
line = re.sub(r'Пожалуйста ','',line)
line = re.sub(r'шей ','',line)
line = re.sub(r'Почему ','',line)
line = re.sub(r'Такой ','',line)
line = re.sub(r'ребят ','',line)
line = re.sub(r'может ','',line)
line = re.sub(r'было ','',line)
line = re.sub(r'такой ','',line)
line = re.sub(r'На ','',line)
line = re.sub(r'тут ','',line)
line = re.sub(r'ШК ','',line)
line = re.sub(r'Ты ','',line)
line = re.sub(r'Го ','',line)
line = re.sub(r'админ','',line)
line = re.sub(r'Ребята ','',line)
line = re.sub(r'вообще ','',line)
line = re.sub(r'пожалуйста','',line)
line = re.sub(r'ребят','',line)
line = re.sub(r'класса ','',line)
line = re.sub(r'им ','',line)
line = re.sub(r'люди ','',line)
line = re.sub(r'которые ','',line)
line = re.sub(r'был ','',line)
line = re.sub(r'школу ','',line)
line = re.sub(r'школа ','',line)
line = re.sub(r'сейчас ','',line)
line = re.sub(r'лс','',line)
line = re.sub(r'то ','',line)
line = re.sub(r'ее ','',line)
line = re.sub(r'писать ','',line)
line = re.sub(r'классов ','',line)
line = re.sub(r'девушка','',line)
line = re.sub(r'привет ','',line)
line = re.sub(r'Он ','',line)
line = re.sub(r'школы ','',line)
line = re.sub(r'их ','',line)
line = re.sub(r'Да ','',line)
line = re.sub(r'шей ','',line)
line = re.sub(r'бесит ','',line)
line = re.sub(r'Хуснутдинова ','',line)
line = re.sub(r'Закирова ','',line)
line = re.sub(r'школе','',line)
line = re.sub(r'школы','',line)
line = re.sub(r'группу ','',line)
line = re.sub(r'то ','',line)
line = re.sub(r'ТО ','',line)
line = re.sub(r'самый ','',line)
line = re.sub(r'такие ','',line)
line = re.sub(r'без ','',line)
line = re.sub(r'очень ','',line)
line = re.sub(r'завтра ','',line)
line = re.sub(r'вроде ','',line)
line = re.sub(r'там ','',line)
line = re.sub(r'Че ','',line)
line = re.sub(r'день ','',line)
line = re.sub(r'зачем ','',line)
line = re.sub(r'Мерикова ','',line)
line = re.sub(r'такая ','',line)
line = re.sub(r'спасибо ','',line)
line = re.sub(r'всё ','',line)
line = re.sub(r'чем ','',line)
line = re.sub(r'раз ','',line)
line = re.sub(r'со ','',line)
line = re.sub(r'её ','',line)
line = re.sub(r'зю ','',line)
line = re.sub(r'была ','',line)
line = re.sub(r'буду ','',line)
line = re.sub(r'быть ','',line)
line = re.sub(r'хотя ','',line)
line = re.sub(r'да ','',line)
line = re.sub(r'Артур ','',line)
line = re.sub(r'Но ','',line)
line = re.sub(r'классы ','',line)
line = re.sub(r'девочек ','',line)
line = re.sub(r'каждый ','',line)
line = re.sub(r'удачи ','',line)
line = re.sub(r'то','',line)
line = re.sub(r'вас ','',line)
line = re.sub(r'девочка ','',line)
line = re.sub(r'пост ','',line)
line = re.sub(r'теперь ','',line)
line = re.sub(r'какой ','',line)
line = re.sub(r'Жора ','',line)
line = re.sub(r'класс ','',line)
text.append(line)
text = "".join(text)
wordcloud = WordCloud(font_path='/Library/Fonts/Verdana.ttf',
relative_scaling = 1.0,
stopwords = {'to', 'of'} # set or space-separated string
).generate(text)
plt.imshow(wordcloud)
plt.axis("off")
plt.show() | UTF-8 | Python | false | false | 8,481 | py | 69 | Облако тегов.py | 25 | 0.50864 | 0.50838 | 0 | 234 | 31.897436 | 78 |
krishchow/CtCi | 4,200,478,035,378 | 0a09a98aed433f49745cb2030e5220db4a27eef3 | 171a946e13cdbc08cfb9925449b85d2bea9ffa5b | /continSum.py | c05daea228b373f6df16efe1333335b1dd11013d | []
| no_license | https://github.com/krishchow/CtCi | cf48b5153afda67d5b3216163188f0d13e00c6ca | 139085f467d32804fbc623dc64b0bee9927eeac7 | refs/heads/master | 2020-07-10T20:05:56.613454 | 2019-09-24T05:49:32 | 2019-09-24T05:49:32 | 204,357,953 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def continousSum(array: list):
current,maximum=float('-inf'),float('-inf')
for i in array:
current = max(i, current+i)
maximum = max(maximum, current)
return maximum
assert continousSum([-2, -3, 4, -1, -2, 1, 5, -3]) == 7 | UTF-8 | Python | false | false | 250 | py | 12 | continSum.py | 11 | 0.592 | 0.556 | 0 | 8 | 30.375 | 55 |
enolfc/os-cloud-ur | 19,061,064,879,295 | b63ecc179866ab735680c7f298ac3c353e52a23c | 4d3f79abd81128155b47041b58bc61155141031c | /ceilometer2ur/__init__.py | 21d267bcdbd4bfd9dfd2097ea02b40d9701b6099 | []
| no_license | https://github.com/enolfc/os-cloud-ur | 192288704bb1ca49052a7adaad30f07022535bd6 | 68b8de95454b083cc86e23d441a88c0ba10f81bd | refs/heads/master | 2016-03-06T02:59:36.255348 | 2014-01-21T12:33:47 | 2014-01-21T12:33:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2014 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
## Fields in the Cloud Accounting Usage Record
UR_FIELDS = ['VMUUID', 'SiteName', 'MachineName', 'LocalUserId',
'LocalGroupId', 'GlobalUserName', 'FQAN', 'Status',
'StartTime', 'EndTime', 'SuspendDuration', 'WallDuration',
'CpuDuration', 'CpuCount', 'NetworkType', 'NetworkInbound',
'NetworkOutbound', 'Memory', 'Disk', 'StorageRecordId',
'ImageId', 'CloudType'
]
| UTF-8 | Python | false | false | 1,036 | py | 7 | __init__.py | 6 | 0.694015 | 0.686293 | 0 | 22 | 46.090909 | 75 |
explosion/spaCy | 128,849,065,873 | c916b3e2b5e980f926fb2cd0e6bbf00aa952145d | 55f6a9b8f90ae308a90739fd8f77f4e7cd10ff19 | /spacy/lang/fa/examples.py | 9c6fb0345e3251d690f234c5e68b39275da27d40 | [
"MIT"
]
| permissive | https://github.com/explosion/spaCy | cce07ee403aa398de7ba8941a2c11d22aea68021 | 3e4264899c3b12f8eabc5cd700146177a34824d0 | refs/heads/master | 2023-08-31T07:18:13.598768 | 2023-08-30T09:58:14 | 2023-08-30T09:58:14 | 21,467,110 | 26,348 | 4,983 | MIT | false | 2023-09-13T17:56:22 | 2014-07-03T15:15:40 | 2023-09-13T17:12:06 | 2023-09-13T17:56:22 | 201,927 | 27,077 | 4,242 | 92 | Python | false | false | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.fa.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"این یک جمله نمونه می باشد.",
"قرار ما، امروز ساعت ۲:۳۰ بعدازظهر هست!",
"دیروز علی به من ۲۰۰۰.۱﷼ پول نقد داد.",
"چطور میتوان از تهران به کاشان رفت؟",
"حدود ۸۰٪ هوا از نیتروژن تشکیل شده است.",
]
| UTF-8 | Python | false | false | 515 | py | 1,102 | examples.py | 784 | 0.649867 | 0.623342 | 0 | 15 | 24.133333 | 56 |
litex-hub/fpga_101 | 7,060,926,281,947 | 8a2d4e2c8f8572ee5ed1f6b7daa1ea03988eb606 | badff67fdb081297cec78d8bb3383d19c3a0c439 | /lab001/solutions/s004.py | f0f9e0476fbcd91db03e75ae5fdfa88e5ade7cc7 | [
"BSD-2-Clause"
]
| permissive | https://github.com/litex-hub/fpga_101 | 67635edb6a8b84e2bbf3f0972130e06981f5ea50 | 1e9c8d270e5c9d91c601b6a68a5466941340b964 | refs/heads/master | 2022-12-12T20:27:22.612261 | 2022-12-02T07:45:41 | 2022-12-02T07:45:41 | 128,534,747 | 285 | 50 | BSD-2-Clause | false | 2022-03-28T14:33:53 | 2018-04-07T14:13:01 | 2022-03-28T11:05:07 | 2022-03-28T14:33:51 | 3,669 | 246 | 37 | 2 | Python | false | false | #!/usr/bin/env python3
from migen import *
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform
# IOs ----------------------------------------------------------------------------------------------
_io = [
("user_led", 0, Pins("H17"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("K15"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("J13"), IOStandard("LVCMOS33")),
("user_led", 3, Pins("N14"), IOStandard("LVCMOS33")),
("user_led", 4, Pins("R18"), IOStandard("LVCMOS33")),
("user_led", 5, Pins("V17"), IOStandard("LVCMOS33")),
("user_led", 6, Pins("U17"), IOStandard("LVCMOS33")),
("user_led", 7, Pins("U16"), IOStandard("LVCMOS33")),
("user_led", 8, Pins("V16"), IOStandard("LVCMOS33")),
("user_led", 9, Pins("T15"), IOStandard("LVCMOS33")),
("user_led", 10, Pins("U14"), IOStandard("LVCMOS33")),
("user_led", 11, Pins("T16"), IOStandard("LVCMOS33")),
("user_led", 12, Pins("V15"), IOStandard("LVCMOS33")),
("user_led", 13, Pins("V14"), IOStandard("LVCMOS33")),
("user_led", 14, Pins("V12"), IOStandard("LVCMOS33")),
("user_led", 15, Pins("V11"), IOStandard("LVCMOS33")),
("user_sw", 0, Pins("J15"), IOStandard("LVCMOS33")),
("user_sw", 1, Pins("L16"), IOStandard("LVCMOS33")),
("user_sw", 2, Pins("M13"), IOStandard("LVCMOS33")),
("user_sw", 3, Pins("R15"), IOStandard("LVCMOS33")),
("user_sw", 4, Pins("R17"), IOStandard("LVCMOS33")),
("user_sw", 5, Pins("T18"), IOStandard("LVCMOS33")),
("user_sw", 6, Pins("U18"), IOStandard("LVCMOS33")),
("user_sw", 7, Pins("R13"), IOStandard("LVCMOS33")),
("user_sw", 8, Pins("T8"), IOStandard("LVCMOS33")),
("user_sw", 9, Pins("U8"), IOStandard("LVCMOS33")),
("user_sw", 10, Pins("R16"), IOStandard("LVCMOS33")),
("user_sw", 11, Pins("T13"), IOStandard("LVCMOS33")),
("user_sw", 12, Pins("H6"), IOStandard("LVCMOS33")),
("user_sw", 13, Pins("U12"), IOStandard("LVCMOS33")),
("user_sw", 14, Pins("U11"), IOStandard("LVCMOS33")),
("user_sw", 15, Pins("V10"), IOStandard("LVCMOS33")),
("user_btn", 0, Pins("N17"), IOStandard("LVCMOS33")),
("clk100", 0, Pins("E3"), IOStandard("LVCMOS33")),
("cpu_reset", 0, Pins("C12"), IOStandard("LVCMOS33")),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk100"
default_clk_period = 10.0
def __init__(self):
XilinxPlatform.__init__(self, "xc7a100t-CSG324-1", _io, toolchain="vivado")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
# Design -------------------------------------------------------------------------------------------
# Create our platform (fpga interface)
platform = Platform()
# Create our module (fpga description)
class Switches(Module):
def __init__(self, platform):
# synchronous assignments
self.sync += []
# combinatorial assignements
for i in range(0, 8):
led = platform.request("user_led", i)
sw = platform.request("user_sw", i)
self.comb += led.eq(~sw)
for i in range(8, 16):
led = platform.request("user_led", i)
sw = platform.request("user_sw", i)
self.comb += led.eq(sw)
module = Switches(platform)
# Build --------------------------------------------------------------------------------------------
platform.build(module)
| UTF-8 | Python | false | false | 3,535 | py | 31 | s004.py | 25 | 0.519378 | 0.461103 | 0 | 88 | 39.159091 | 100 |
zzq5271137/learn_python | 4,509,715,668,538 | 992c46e091ea4de4bca88e9f86e1b3e204a32b4e | b01b437d3a80251aa3e36edd67b8ecdb90ec0e28 | /15-虚拟环境/02-virtualenvwrapper_intro.py | 6bb56bcf64eb5a41f14fb9afd5ffd011be4333c2 | []
| no_license | https://github.com/zzq5271137/learn_python | e8ddce04c7022542bad25d8d18d4863246fe7842 | 2ebca15948568e761d46b5152e8de3fa82720dba | refs/heads/master | 2023-04-23T09:14:38.853949 | 2021-05-02T17:29:11 | 2021-05-02T17:29:11 | 271,830,802 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
virtualenvwrapper介绍
"""
"""
virtualenvwrapper这个软件可以让我们管理虚拟环境变得简单, 不用再跑到某个目录下通过virtualenv来创建虚拟环境,
也不用再跑到某个虚拟环境的具体目录下去启动虚拟环境;
安装virtualenvwrapper:
1. Linux:
pip install virtualenvwrapper
2. Windows:
pip install virtualenvwrapper-win
virtualenvwrapper的基本使用:
1. 创建虚拟环境:
执行"mkvirtualenv 虚拟环境的名字"命令;
执行这个命令后, 会在你当前用户下(C:/Users/用户名)创建一个"Envs"的文件夹(如果不存在的话),
然后将这个虚拟环境安装到这个目录下;
如果你当前机器的环境变量的配置中, Python3/Scripts的查找路径在Python2/Scripts的前面,
那么将会使用Python3作为这个虚拟环境的解释器; 如果Python2/Scripts的查找路径在Python3/Scripts的前面,
那么将会使用Python2作为这个虚拟环境的解释器(以上讨论的情景是在你的机器上安装了Python2和Python3,
并且他们都安装了virtualenvwrapper的情况下);
2. 进入虚拟环境:
执行"workon 虚拟环境的名字"命令;
它会去"C:/Users/用户名/Envs"文件夹下去找相应名字的虚拟环境(无论你控制台当前处在什么目录下, 都可以);
只执行"workon"命令, 会列出"C:/Users/用户名/Envs"文件夹下所有已安装的虚拟环境;
(Powershell和Cmder好像不太行, 得用CMD执行该命令)
3. 退出虚拟环境:
无论你在哪个虚拟环境中, 无论你控制台当前处在什么目录下, 只需要执行"deactivate"命令, 就可以退出当前的虚拟环境;
这个执行的"deactivate"命令并不是在虚拟环境的Scripts目录下查找的, 而是在你系统配置的系统变量中,
你的配置的Python3/Scripts目录下查找的; 所以, 无论你在任何目录下, 执行"deactivate"命令, 都会退出虚拟环境;
4. 删除虚拟环境:
rmvirtualenv 虚拟环境的名字
5. 列出所有虚拟环境:
lsvirtualenv
6. 进入到当前Python环境所在目录:
执行"cdvirtualenv"命令;
执行这个命令, 会让你cd到当前Python环境所在的目录, 不论你是系统级的Python环境,
还是虚拟环境(不论该虚拟环境是在C:/Users/用户名/Envs下, 还是别的自己创建的地方), 都有用;
7. 修改mkvirtualenv的默认路径:
在环境变量配置中, 添加一个参数WORKON_HOME, 将这个参数的值设置为你想要的路径;
8. 创建虚拟环境时指定Python解释器版本:
mkvirtualenv --python==Python解释器的绝对路径 虚拟环境的名字
"""
| UTF-8 | Python | false | false | 2,649 | py | 174 | 02-virtualenvwrapper_intro.py | 169 | 0.79418 | 0.780696 | 0 | 45 | 30.311111 | 73 |
jperkelens/pants | 12,807,592,510,630 | 1e496e72bcfacdd7a43f7dff2f8aeab2a944bdad | f6d08f2ac89f0dae24c39d9c827484cfb8aefcea | /tests/python/pants_test/logging/native_engine_logging_integration_test.py | 7ab1487800ba906f3668368810130c589601b37b | [
"Apache-2.0"
]
| permissive | https://github.com/jperkelens/pants | 51f26f395a645655e9bdf89b666dee61823a1ba7 | b7ad997b5ef9175cc5e22e36574d8590bc8da120 | refs/heads/master | 2020-12-23T02:44:53.078141 | 2020-08-22T15:55:07 | 2020-08-22T15:55:07 | 237,007,066 | 0 | 0 | Apache-2.0 | true | 2020-01-29T14:51:12 | 2020-01-29T14:51:11 | 2020-01-29T07:52:26 | 2020-01-29T07:52:23 | 100,831 | 0 | 0 | 0 | null | false | false | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_integration_test import PantsIntegrationTest, read_pantsd_log
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
class NativeEngineLoggingTest(PantsIntegrationTest):
@classmethod
def use_pantsd_env_var(cls):
"""Some of the tests here expect to read the standard error after an intentional failure.
However, when pantsd is enabled, these errors are logged to logs/exceptions.<pid>.log So
stderr appears empty. (see #7320)
"""
return False
def test_native_logging(self) -> None:
expected_msg = r"\[DEBUG\] Launching \d+ root"
pants_run = self.run_pants(["-linfo", "list", "3rdparty::"])
self.assertNotRegex(pants_run.stderr, expected_msg)
pants_run = self.run_pants(["-ldebug", "list", "3rdparty::"])
self.assertRegex(pants_run.stderr, expected_msg)
class PantsdNativeLoggingTest(PantsDaemonIntegrationTestBase):
def test_pantsd_file_logging(self) -> None:
with self.pantsd_successful_run_context("debug") as ctx:
daemon_run = ctx.runner(["list", "3rdparty::"])
ctx.checker.assert_started()
assert "[DEBUG] connecting to pantsd on port" in daemon_run.stderr_data
pantsd_log = "\n".join(read_pantsd_log(ctx.workdir))
assert "[DEBUG] logging initialized" in pantsd_log
| UTF-8 | Python | false | false | 1,534 | py | 36 | native_engine_logging_integration_test.py | 33 | 0.685789 | 0.677314 | 0 | 35 | 42.828571 | 97 |
Baobao211195/python-tutorial | 944,892,805,692 | 5a2dfd29b7e292602614c1124d265adad324c4ef | 53a7643811aa67c201821c41dcfae24a738e3709 | /Clazz/compare_static_method_class_method.py | 246075c154c8e7411b3b3057c293defc906b2640 | []
| no_license | https://github.com/Baobao211195/python-tutorial | 50f39cc4c68c22cbb3cea011a37277785911fa10 | 1a15bbdbaafe5c01d773a24241b67a5f4a454ceb | refs/heads/master | 2023-03-15T17:22:13.258356 | 2020-01-05T14:47:53 | 2020-01-05T14:47:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
class A(object):
def foo(self, x):
print("executing foo(%s, %s)" % (self, x))
@classmethod
def class_foo(cls, x):
print("executing class_foo(%s, %s)" % (cls, x))
@staticmethod
def static_foo(x):
print("executing static_foo(%s)" % x)
if __name__ == '__main__':
a = A()
print("initial object")
a.foo(1)
print("salary_fsoft" > "salay_cmc")
print("calling class method by instance")
a.class_foo(1)
# print("calling class method by Class of object")
# A.class_foo(1)
#
print("calling static method by instance")
a.static_foo(1)
#
# print("calling static method by Class")
# A.static_foo(1)
#
# print("========================================")
# print(a.foo(1))
| UTF-8 | Python | false | false | 805 | py | 166 | compare_static_method_class_method.py | 150 | 0.513043 | 0.504348 | 0 | 36 | 21.361111 | 55 |
biocore/empress | 2,276,332,669,204 | 62f952fa742167ad721ac5e33e4c4f88c903c2e5 | c91e506d4a251dc9c9354804284f4b540d942c70 | /empress/_plot_utils.py | 57ed8c5d4ac0a46bfe930fab27391602000c691b | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0",
"BSD-3-Clause"
]
| permissive | https://github.com/biocore/empress | 76d3cef31683457a4a6a79f76e83d37b3dc2b0cc | cc3975a71c931f2d356856fb5ee9669b3f83f150 | refs/heads/master | 2022-06-29T00:30:28.364512 | 2022-05-23T14:38:45 | 2022-05-23T14:38:45 | 111,137,123 | 48 | 31 | BSD-3-Clause | false | 2022-08-22T23:00:54 | 2017-11-17T18:25:37 | 2022-05-23T14:48:50 | 2022-05-31T01:56:17 | 48,032 | 33 | 29 | 144 | JavaScript | false | false | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
from bp import parse_newick
import numpy as np
import pandas as pd
from scipy.spatial.distance import euclidean
SUPPORT_FILES = pkg_resources.resource_filename('empress', 'support_files')
TEMPLATES = os.path.join(SUPPORT_FILES, 'templates')
def get_bp(newickfmt):
"""Loads a bp.BP tree from a QIIME 2 NewickFormat object.
This function, along with save_viz(), was moved here from _plot.py so it
could be reused between different Empress commands.
Parameters
----------
newickfmt : q2_types.tree.NewickFormat
Returns
-------
bp.BP
"""
with open(str(newickfmt)) as treefile:
# The file will still be closed even though we return from within the
# with block: see https://stackoverflow.com/a/9885287/10730311.
return parse_newick(treefile.readline())
def save_viz(viz, output_dir, q2=True):
"""Saves an Empress visualization to a filepath.
Parameters
----------
viz : empress.Empress
output_dir : str
q2 : bool
"""
with open(os.path.join(output_dir, 'empress.html'), 'w') as htmlfile:
htmlfile.write(str(viz))
viz.copy_support_files(output_dir)
if q2:
import q2templates
index = os.path.join(TEMPLATES, 'index.html')
q2templates.render(index, output_dir)
def prepare_pcoa(pcoa, number_of_features):
"""Selects top N biplot features by magnitude (coped from q2-emperor).
Parameters
----------
pcoa : skbio.stats.ordination.OrdinationResults
number_of_features : int
Returns
-------
skbio.stats.ordination.OrdinationResults
"""
feats = pcoa.features.copy()
# in cases where the axes are all zero there might be all-NA
# columns
feats.fillna(0, inplace=True)
origin = np.zeros_like(feats.columns)
feats['importance'] = feats.apply(euclidean, axis=1, args=(origin,))
feats.sort_values('importance', inplace=True, ascending=False)
feats.drop(['importance'], inplace=True, axis=1)
pcoa.features = feats[:number_of_features].copy()
return pcoa
def check_and_process_files(output_dir, tree_file, feature_metadata):
"""Initial checks and processing of files for standalone CLI plotting.
Parameters
----------
output_dir : str
tree_file : str
fm_file : str
Returns
-------
bp.Tree
pd.DataFrame
"""
if os.path.isdir(output_dir):
raise OSError("Output directory already exists!")
with open(str(tree_file), "r") as f:
tree_newick = parse_newick(f.readline())
if feature_metadata is not None:
feature_metadata = pd.read_csv(feature_metadata, sep="\t", index_col=0)
return tree_newick, feature_metadata
| UTF-8 | Python | false | false | 3,088 | py | 74 | _plot_utils.py | 59 | 0.629858 | 0.618523 | 0 | 107 | 27.859813 | 79 |
elitumgroup/erp-maeq-v11 | 14,413,910,295,012 | 0ffb38ccfddba485872104798ff0cf1161ee1afb | 71ca130e51f181aae025e15efd004c0f89237ef2 | /eliterp_purchases/__manifest__.py | 795329a72cd53c8fdf7a5a1e77e01b7d822eaa4b | []
| no_license | https://github.com/elitumgroup/erp-maeq-v11 | cf0e2f91926b4a3350802b16ca018299f6f6a6b0 | 8e0243818968a63d8212c08f75ce431bcf33346c | refs/heads/master | 2021-06-04T19:23:02.576851 | 2021-01-04T15:53:48 | 2021-01-04T15:53:48 | 157,874,880 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Elitumdevelop S.A, Ing. Mario Rangel
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
'name': "Módulo de Compras",
'summary': "Compras.",
'author': "Ing. Mario Rangel, Elitumgroup S.A",
'website': "http://www.elitumgroup.com",
'category': "Personalization",
'license': "LGPL-3",
'version': "1.0",
'depends': [
'base',
'eliterp_accounting',
'purchase',
'purchase_requisition',
'stock',
'product',
'contacts',
'stock_account',
],
'data': [
'data/sequences.xml',
'security/ir.model.access.csv',
'security/purchases_security.xml',
'views/in_invoice_views.xml',
'views/supplier_views.xml',
'views/purchase_requisition_views.xml',
'views/purchase_order_views.xml',
'views/menus.xml',
'views/dashboard.xml',
],
'init_xml': [],
'update_xml': [],
'installable': True,
'active': False,
}
| UTF-8 | Python | false | false | 1,039 | py | 242 | __manifest__.py | 102 | 0.550096 | 0.540462 | 0 | 38 | 26.315789 | 68 |
jinghaoxu/yimiauto | 18,854,906,447,477 | 33e1be344618e66eaa7c20fcc5cc8cdc72fff5a4 | 32c87a4358ad4dd3c10259d648199d608ff8dbd3 | /YimiDjango/view_all/page.py | 78ce3dc24feb36f719ab54b48ea5b8080282586d | []
| no_license | https://github.com/jinghaoxu/yimiauto | baef5a5686b2965704f27aaed531fee3d4920ab2 | 4a3f969ba5c30117f38b83dc7ab10d0c76de8547 | refs/heads/master | 2021-09-10T04:38:17.346926 | 2018-03-21T01:46:28 | 2018-03-21T01:54:13 | 126,105,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render_to_response
"""
返回页面
"""
# 首页
def index(request):
return render_to_response('index.html')
# 用例集
def suite(request):
return render_to_response('suite.html')
# 用例
def case(request):
try:
suiteid = request.GET.get('suiteid')
except:
suiteid = False
try:
modularid = request.GET.get('modularid')
except:
modularid = False
return render_to_response('case.html', {'suiteid': suiteid, 'modularid': modularid})
# 用例报告
def case_report(request):
return render_to_response('case_report.html')
# 用例集报告
def suite_report(request):
return render_to_response('suite_report.html')
# 模块
def modular(request):
return render_to_response('modular.html')
| UTF-8 | Python | false | false | 798 | py | 46 | page.py | 37 | 0.659151 | 0.659151 | 0 | 43 | 16.534884 | 88 |
mrdutta27/Liquid-Helium-Simulations-Summer-2015 | 16,758,962,394,972 | d4e87fe07af26ca4040d0e80b0f0b3738f30b1bb | 36b0a4ee431ad6a2dfd2df046564d3ec620a18f9 | /simulation_data/Wavenumber_Z0/Old Data/bad data/color scatter.py | e59aaa11e6da7d3ea1e4263047b244d3e5475648 | []
| no_license | https://github.com/mrdutta27/Liquid-Helium-Simulations-Summer-2015 | a1db6be533936fd0d8f0a0e2ea05bd9919d5f6ac | 78af9633a9f719c91f3f571bb28093a0f25f4cab | refs/heads/master | 2021-01-10T06:47:57.499277 | 2015-08-13T21:31:24 | 2015-08-13T21:38:14 | 35,980,761 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import csv
import matplotlib.pyplot as plt
import pylab as p
import numpy as np
data = csv.reader(open(r'C:\Users\Suryabrata\Dropbox\Yale\Summer 2015\McKinsey Research\SimulationData\Wavenumber\Particle Sim\bolometer\all_bolometer.csv', 'rb'), delimiter=",", quotechar='|')
#Prompt Signal Rate vs All Signal Rate
x_pos, y_pos, time = [],[],[]
wavenumber=float(2.2)
depth=float(.45)
for row in data:
#if float(row[1]) == float(depth) and float(row[0])==float(wavenumber): #phonon at depth 0
if float(row[1]) == float(depth): #phonon at depth 0
x_pos.append(float(row[3]))
y_pos.append(float(row[4]))
time.append(float(row[2]))
hist, xedges, yedges = np.histogram2d(x_pos, y_pos, range=[[-.5,.5],[-.5,.5]], bins=21)
import plotly.plotly as py
from plotly.graph_objs import *
data = Data([
Heatmap(
z=hist
)
])
py.image.save_as({'data': data}, r"C:\Users\Suryabrata\Dropbox\Yale\Summer 2015\McKinsey Research\SimulationData\Wavenumber\Particle Sim\plots\Heatmaps\Heatmap_z.45.png")
print x_pos | UTF-8 | Python | false | false | 1,082 | py | 11,117 | color scatter.py | 15 | 0.671904 | 0.644177 | 0 | 36 | 29.083333 | 193 |
Code-Institute-Submissions/Poster-Panda | 10,574,209,531,249 | 90defe9a257d71cf354ebb0a3b96bb186de1ae46 | 3c4327089fae4e666d1ff2e7f3656a88c2f096d9 | /wishlist/views.py | 0dcd7de7fa89031d84d8f7836b0dc55fcfeebdc7 | []
| no_license | https://github.com/Code-Institute-Submissions/Poster-Panda | 42ff970ba19d55aeb62638923bdc673db590e2b2 | 773f734afd9370916b3f4875c9f404c37eb69000 | refs/heads/master | 2023-07-07T16:05:44.815209 | 2021-08-09T08:53:12 | 2021-08-09T08:53:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from products.models import Product
from profiles.models import UserProfile
from wishlist.models import Wishlist, WishlistItem
@login_required
def view_wishlist(request):
""" A view to render all wishlist items """
user = UserProfile.objects.get(user=request.user)
wishlist = Wishlist.objects.get(user=user)
wishlist_products = WishlistItem.objects.filter(wishlist=wishlist)
context = {
'wishlist_products': wishlist_products,
}
return render(request, 'wishlist/wishlist.html', context)
@login_required
def add_to_wishlist(request, product_id):
""" A view to add products to the wishlist and remove from it """
if request.method == 'GET':
# get the wishlist
user = UserProfile.objects.get(user=request.user)
wishlist = Wishlist.objects.get(user=user)
# create a new wishlist item, delete if already in the wishlist
if request.user.is_authenticated:
product = get_object_or_404(Product, pk=product_id)
item = WishlistItem.objects.filter(
product=product, wishlist=wishlist)
if item.exists():
item.delete()
messages.info(request, 'Removed from your wishlist!')
else:
item = WishlistItem.objects.create(
product=product, wishlist=wishlist)
messages.info(request, 'Added to your wishlist!')
return redirect(reverse('view_wishlist'))
| UTF-8 | Python | false | false | 1,646 | py | 23 | views.py | 7 | 0.669502 | 0.665857 | 0 | 49 | 32.591837 | 73 |
lukeprotag/PathBench | 12,498,354,866,001 | 877fed493c881625f4519622801f10713d0f3114 | 67cf4d424f84bda5a43123bf086b6be83eb52850 | /src/ros/advanced/ros.py | bd0d5243a8db0f31dfba1e0eb564de48e16cfaf3 | [
"BSD-3-Clause"
]
| permissive | https://github.com/lukeprotag/PathBench | 2811df430261b34c78c408c5fcd8d39287d9ac62 | 101e67674efdfa8e27e1cf7787dac9fdf99552fe | refs/heads/master | 2023-07-08T13:18:33.103635 | 2021-07-30T16:55:49 | 2021-07-30T16:55:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import argparse
from typing import Optional
from nptyping import NDArray
import numpy as np
import cv2 as cv
import rospy
from nav_msgs.msg import OccupancyGrid, Odometry
from geometry_msgs.msg import Twist, PoseWithCovariance
# Add PathBench/src to system path for module imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from algorithms.algorithm_manager import AlgorithmManager # noqa: E402
from algorithms.configuration.configuration import Configuration # noqa: E402
from algorithms.configuration.entities.agent import Agent # noqa: E402
from algorithms.configuration.entities.goal import Goal # noqa: E402
from algorithms.configuration.maps.dense_map import DenseMap # noqa: E402
from algorithms.configuration.maps.ros_map import RosMap # noqa: E402
from simulator.services.debug import DebugLevel # noqa: E402
from simulator.services.services import Services # noqa: E402
from simulator.simulator import Simulator # noqa: E402
from structures import Size, Point # noqa: E402
import utility.math as m # noqa: E402
from utility.misc import flatten # noqa: E402
from utility.argparse import add_configuration_flags # noqa: E402
from utility.threading import Lock # noqa: E402
class Ros:
INFLATE: int = 2 # radius of agent for extended walls.
INIT_MAP_SIZE: int = 128 # maximum map size for the first received map (this determines the scaling factor for all subsequent map updates).
MAP_SIZE: int = 256 # the overall map size, can be as big as you like. Note, the initial map fragment will be located at the center of this 'big' map.
TRAVERSABLE_THRESHOLD: int = 30 # weight grid values above this value are considered to be obstacles
_sim: Optional[Simulator] # simulator
_grid: Optional[NDArray[(MAP_SIZE, MAP_SIZE), np.float32]] # weight grid, shape (width, height), weight bounds: (0, 100), unmapped: -1
_size: Optional[Size] # size of the 'big' map (MAP_SIZE, MAP_SIZE).
_res: Optional[float] # map resolution (determined by initial map fragment).
_scale: Optional[float] # scale factor from raw map fragment to PathBench OGM grid (same factor for both x and y-axis)
_agent: Optional[PoseWithCovariance] # latest agent pose data
_agent_lock: Lock
_grid_lock: Lock
def __init__(self) -> None:
self._grid = None
self._sim = None
self._origin = None
self._size = None
self._res = None
self._scale = None
self._agent = None
self._agent_lock = Lock()
self._grid_lock = Lock()
# initialise ROS node
rospy.init_node("path_bench", log_level=rospy.INFO)
rospy.Subscriber("/map", OccupancyGrid, self._set_slam)
rospy.Subscriber('/odom', Odometry, self._update_agent)
self.pubs = {
"vel": rospy.Publisher("/cmd_vel", Twist, queue_size=10), # velocity
}
def _set_slam(self, msg: OccupancyGrid) -> None:
map_info = msg.info
grid_data = msg.data
if self._size is None: # init #
self._size = Size(self.MAP_SIZE, self.MAP_SIZE)
self._res = map_info.resolution
self._scale = self.INIT_MAP_SIZE / map_info.height
if (self.INIT_MAP_SIZE / map_info.width) < self._scale:
self._scale = self.INIT_MAP_SIZE / map_info.width
# convert raw grid data into a matrix for future processing (compression)
raw_grid = np.empty((map_info.height, map_info.width), dtype=np.float32)
for i in range(len(grid_data)):
col = i % map_info.width
row = int((i - col) / map_info.width)
raw_grid[row, col] = grid_data[i]
# compress the map to a suitable size (maintains aspect ratio)
raw_grid = cv.resize(raw_grid, None, fx=self._scale, fy=self._scale, interpolation=cv.INTER_AREA)
if self._origin is None: # init #
# set the origin to the big map origin, use the raw grid origin with
# negative grid position to retrieve the big map's origin in world coordinates
self._origin = Point(map_info.origin.position.x, map_info.origin.position.y)
init_map_size = Size(*raw_grid.shape[::-1])
map_origin_pos = Point((init_map_size.width - self._size.width) // 2, (init_map_size.height - self._size.height) // 2)
self._origin = self._grid_to_world(map_origin_pos)
# get position of big map origin in current raw map
start = self._world_to_grid(self._origin, origin=Point(map_info.origin.position.x, map_info.origin.position.y))
# take an offsetted, potentially cropped view of the current raw map
grid = np.full(self._size, -1)
for i in range(start[0], start[0] + self._size.width):
for j in range(start[1], start[1] + self._size.height):
if i >= 0 and j >= 0 and i < raw_grid.shape[1] and j < raw_grid.shape[0]:
grid[i - start[0]][j - start[1]] = raw_grid[j][i]
# hacky work-around for not having extended walls implemented. Here we manually
# extend the walls, by placing obstacles (works just as well, but should still
# ideally implement extended walls).
INVALID_VALUE = -2
grid_walls_extended = np.full(self._size, INVALID_VALUE)
for idx in np.ndindex(grid_walls_extended.shape):
if grid_walls_extended[idx] == INVALID_VALUE:
grid_walls_extended[idx] = grid[idx]
if grid[idx] > self.TRAVERSABLE_THRESHOLD:
for i in range(-self.INFLATE, self.INFLATE+1):
for j in range(-self.INFLATE, self.INFLATE+1):
grid_walls_extended[(idx[0]+i, idx[1]+j)] = grid[idx]
# make new grid accessible.
# Note, it's up to the user / algorithm to request a map update for thread
# safety, e.g. via `self._sim.services.algorithm.map.request_update()`.
self.grid = grid_walls_extended
@property
def grid(self) -> str:
return 'grid'
@grid.setter
def grid(self, value: NDArray[(MAP_SIZE, MAP_SIZE), np.float32]) -> None:
self._grid_lock.acquire()
self._grid = value
self._grid_lock.release()
@grid.getter
def grid(self) -> Optional[NDArray[(MAP_SIZE, MAP_SIZE), np.float32]]:
self._grid_lock.acquire()
grid = self._grid
self._grid_lock.release()
return grid
def _update_agent(self, msg: Odometry) -> None:
self.agent = msg.pose
@property
def agent(self) -> str:
return 'agent'
@agent.setter
def agent(self, value: PoseWithCovariance) -> None:
self._agent_lock.acquire()
self._agent = value
self._agent_lock.release()
@agent.getter
def agent(self) -> Optional[PoseWithCovariance]:
self._agent_lock.acquire()
agent = self._agent
self._agent_lock.release()
return agent
@staticmethod
def unit_vector(v):
return v / np.linalg.norm(v)
@staticmethod
def angle(v1, v2):
v1 = unit_vector(v1)
v2 = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1, v2), -1.0, 1.0))
def _send_way_point(self, wp: Point) -> None:
"""
Sends velocity commands to get the robot to move to the given way point.
"""
goal_tresh = 0.1
angle_tresh = 0.1
sleep = 0.01
max_it = 1000
rot_multiplier = 5
forward_multiplier = 0.5
found = False
wp = np.array(self._grid_to_world(wp))
rospy.loginfo("Sending waypoint: {}".format(wp))
for _ in range(max_it):
agent_pos = np.array([self.agent.pose.position.x, self.agent.pose.position.y])
q_agent_orientation = self.agent.pose.orientation
q_agent_orientation = [q_agent_orientation.x, q_agent_orientation.y,
q_agent_orientation.z, q_agent_orientation.w]
agent_rot = m.euler_from_quaternion(q_agent_orientation, axes='sxyz')[0]
goal_dir = wp - agent_pos
goal_rot = np.arctan2(goal_dir[1], goal_dir[0])
angle_left = np.sign(goal_rot - agent_rot) * (np.abs(goal_rot - agent_rot) % np.pi)
dist_left = np.linalg.norm(goal_dir)
# rotate
if not np.abs(angle_left) < angle_tresh:
rot_speed = np.clip(angle_left * rot_multiplier, -1, 1)
self._send_vel_msg(rot=rot_speed)
rospy.sleep(sleep)
continue
# go forward
if not dist_left < goal_tresh:
forward_speed = np.clip(dist_left * forward_multiplier, 0, 0.5)
self._send_vel_msg(vel=forward_speed)
rospy.sleep(sleep)
continue
else:
found = True
break
# stop
self._send_vel_msg()
rospy.loginfo("Waypoint found: {}".format(found))
def _send_vel_msg(self, vel=None, rot=None) -> None:
"""
Send velocity.
"""
if not vel:
vel = 0
if not rot:
rot = 0
vel = [vel, 0, 0]
rot = [0, 0, rot]
vel_msg = Twist()
vel_msg.linear.x, vel_msg.linear.y, vel_msg.linear.z = vel
vel_msg.angular.x, vel_msg.angular.y, vel_msg.angular.z = rot
self.pubs["vel"].publish(vel_msg)
def _map_update_requested(self) -> None:
"""
Map update was requested.
"""
pass
def _world_to_grid(self, world_pos: Point, origin: Optional[Point] = None) -> Point:
"""
Converts from meters coordinates to PathBench's grid coordinates (`self.grid`).
"""
# bottom-left corner of the grid to convert to
if origin is None:
origin = self._origin
grid_pos = world_pos
grid_pos = grid_pos - origin
grid_pos = grid_pos / self._res
grid_pos = grid_pos * self._scale
grid_pos = Point(*np.rint(grid_pos.values))
return grid_pos
def _grid_to_world(self, grid_pos: Point) -> Point:
"""
Converts PathBench's grid coordinates (`self.grid`) to meters coordinates.
"""
world_pos = grid_pos
world_pos = world_pos / self._scale
world_pos = world_pos * self._res
world_pos = world_pos + self._origin
return world_pos
def _setup_sim(self, config: Optional[Configuration] = None, goal: Optional[Point] = None) -> Simulator:
"""
Sets up the simulator (e.g. algorithm and map configuration).
"""
while self.grid is None or self.agent is None:
rospy.loginfo("Waiting for grid and agent to initialise...")
rospy.sleep(0.5)
if config is None:
config = Configuration()
# general
config.simulator_graphics = True
config.simulator_key_frame_speed = 0.16
config.simulator_key_frame_skip = 20
config.get_agent_position = lambda: self._world_to_grid(Point(self.agent.pose.position.x, self.agent.pose.position.y))
config.visualiser_simulator_config = False # hide the simulator config window
# algorithm
if config.algorithm_name is None:
config.algorithm_name = "WPN-view"
config.simulator_algorithm_type, config.simulator_testing_type, config.simulator_algorithm_parameters = config.algorithms[config.algorithm_name]
# map
goal = Goal(Point(0, 0) if goal is None else goal)
agent = Agent(self._world_to_grid(Point(self.agent.pose.position.x, self.agent.pose.position.y)),
radius=self.INFLATE)
mp = RosMap(agent,
goal,
lambda: self.grid,
traversable_threshold=self.TRAVERSABLE_THRESHOLD,
unmapped_value=-1,
wp_publish=self._send_way_point,
update_requested=self._map_update_requested,
name="ROS Map")
config.maps = {mp.name: mp}
config.simulator_initial_map = list(config.maps.values())[0]
config.map_name = list(config.maps.keys())[0]
# create the simulator
s = Services(config)
s.algorithm.map.request_update()
sim = Simulator(s)
return sim
def start(self, config: Optional[Configuration] = None, goal: Optional[Point] = None) -> None:
"""
Start the simulator.
"""
rospy.loginfo("Starting simulator")
self._sim = self._setup_sim(config, goal)
self._sim.start()
def main() -> bool:
parser = argparse.ArgumentParser(prog="ros.py",
description="PathBench 2D ROS extension runner",
formatter_class=argparse.RawTextHelpFormatter)
configurers: List[Callable[[Configuration, argparse.Namespace], bool]] = []
configurers.append(add_configuration_flags(parser, visualiser_flags=True, algorithms_flags=True, multiple_algorithms_specifiable=False))
parser.add_argument("-g", "--goal", nargs=2, type=int, help="goal position \"x y\"")
args = parser.parse_args()
print("args:{}".format(args))
config = Configuration()
for c in configurers:
if not c(config, args):
return False
if args.algorithm:
config.algorithm_name = list(config.algorithms.keys())[0]
config.simulator_algorithm_type, config.simulator_testing_type, config.simulator_algorithm_parameters = config.algorithms[config.algorithm_name]
goal = Point(*args.goal) if args.goal else None
ros = Ros()
ros.start(config, goal)
return True
if __name__ == "__main__":
ret = main()
exit_code = 0 if ret else 1
sys.exit(exit_code)
| UTF-8 | Python | false | false | 13,960 | py | 20 | ros.py | 13 | 0.602722 | 0.592335 | 0 | 368 | 36.934783 | 156 |
AndreiErofeev/RaifHack_by_Crystal | 670,014,913,925 | 3dc480f758b00a73b054441e971ee033b4f3bc31 | 4108281fd9bae04ac72c7802f8d8d5724ed920d5 | /model/predict.py | 74e58dcd1a7897b51df4f7148fa4396f6722d03b | []
| no_license | https://github.com/AndreiErofeev/RaifHack_by_Crystal | feec90a26efbf1becf45945cc1c30d057b2313a1 | 16feaf13bb196ab0e03cee4b4de6cc0e28e78a18 | refs/heads/main | 2023-08-07T19:37:40.584910 | 2021-10-03T00:34:42 | 2021-10-03T00:34:42 | 409,711,320 | 0 | 0 | null | false | 2021-10-04T12:01:28 | 2021-09-23T18:59:15 | 2021-10-03T00:37:41 | 2021-10-04T12:01:27 | 294 | 0 | 0 | 1 | Python | false | false | import xgboost as xgb
import pandas as pd
import numpy as np
import pickle
from .featuretransforming import floor_cleaning, feature_transformer
def predict(data_test, modelname = 'model.pkl', outname = '../submit.csv'):
data_test = floor_cleaning(data_test)
features = ['id', 'floor', 'city', 'lat', 'lng', 'reform_count_of_houses_1000', 'reform_count_of_houses_500',
'reform_house_population_1000', 'reform_mean_year_building_1000', 'price_type'] + \
data_test.filter(regex='osm.*').columns.to_list() + ['total_square', 'realty_type', 'region']
features.remove('osm_city_nearest_name')
f_transf_t = feature_transformer(data_test, features, mode='test')
X_subm = f_transf_t.transform(
fill_features_mean=['reform_house_population_1000', 'reform_mean_year_building_1000', 'floor'],
drop_cols=['city', 'id'], cat_features=['realty_type'])
X_subm = X_subm.drop(columns=['region', 'price_type'])
with open(modelname, 'rb') as f:
models = pickle.load(f)
X_subm['zero_preds'] = models['pt_zeros_model'].predict(X_subm)
y_subm = np.exp(models['pt_ones_model'].predict(X_subm)) * models['magic_factor']
df_to_submit = pd.DataFrame(data=np.array([data_test['id'].values, y_subm]).T,
columns=['id', 'per_square_meter_price'])
df_to_submit.to_csv(outname) | UTF-8 | Python | false | false | 1,382 | py | 10 | predict.py | 8 | 0.638929 | 0.622287 | 0 | 30 | 45.1 | 113 |
davidbrouillette/KDigitalPortland-PingPong | 10,909,216,978,794 | 6d991bf44defef112e02d02bb3f008f531e018dd | 230f4e4f33c712a011f5b812815130d48131e9da | /v1/python/textObject.py | 6a04efe120891df0a9d7b28953971a502c4dc3a8 | []
| no_license | https://github.com/davidbrouillette/KDigitalPortland-PingPong | f04b091ba6f04896e475f90457271c73adff6e76 | 5501d68497ba56e095dc464945e19cbf9b798ec3 | refs/heads/master | 2020-03-26T12:18:35.974989 | 2018-08-16T20:30:43 | 2018-08-16T20:30:43 | 144,885,972 | 1 | 0 | null | false | 2018-08-16T20:30:44 | 2018-08-15T17:57:41 | 2018-08-16T20:17:32 | 2018-08-16T20:30:44 | 25 | 1 | 1 | 0 | Python | false | null | import pygame
class TextObject:
def __init__(self, id, text, x, y, fontSize=50):
self.id = id
self.pos = (x,y)
self.text = text
self.color = (255,117,73)
self.font = pygame.font.SysFont("lato", fontSize, True)
self.bounds = self.getSurface(self.text)
def draw(self, surface, centralized=False):
textSurface, self.bounds = self.getSurface(self.text)
pos = (self.pos[0] - self.bounds.width // 2, self.pos[1])
surface.blit(textSurface, pos)
def getSurface(self, text):
textSurface = self.font.render(text, False, self.color)
return textSurface, textSurface.get_rect()
def update(self):
pass
| UTF-8 | Python | false | false | 726 | py | 31 | textObject.py | 27 | 0.589532 | 0.571625 | 0 | 22 | 31.909091 | 65 |
orange-eng/internship | 7,275,674,638,480 | 298fd91c4c11b07213690053948e4442d60c37b1 | f0adf5afb93b7f0a67802e876a02e898cd92a172 | /Tencent/Filters/video_cut copy.py | fd2353be2ae4592acb231e86512b91ed688e2de3 | [
"Apache-2.0"
]
| permissive | https://github.com/orange-eng/internship | 9a2f746b3d50673038481392100d375f6eec82d3 | c8c566df453d3a4bdf692338f74916ae15792fa1 | refs/heads/main | 2023-07-18T11:46:36.659858 | 2021-08-31T09:39:10 | 2021-08-31T09:39:10 | 358,230,295 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
frameNum = 0
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
frameNum += 1
if ret == True:
tempframe = frame
if(frameNum==1):
previousframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)
print(111)
if(frameNum>=2):
currentframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)
currentframe = cv2.absdiff(currentframe,previousframe)
median = cv2.medianBlur(currentframe,3)
# img = cv2.imread("E:/chinese_ocr-master/4.png")
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold_frame = cv2.threshold(currentframe, 20, 255, cv2.THRESH_BINARY)
gauss_image = cv2.GaussianBlur(threshold_frame, (3, 3), 0)
print(222)
# Display the resulting frame
cv2.imshow('原图',frame)
cv2.imshow('Frame',currentframe)
cv2.imshow('median',median)
# Press Q on keyboard to exit
if cv2.waitKey(33) & 0xFF == ord('q'):
break
previousframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 1,459 | py | 172 | video_cut copy.py | 135 | 0.635739 | 0.602062 | 0 | 49 | 28.653061 | 86 |
UdeS-CoBIUS/G4Conservation | 8,727,373,590,338 | fd16382579dfee6cf7aceeb2a3cc1a19f38d047a | 9dd4d6da5bb878d6d67e6d6a632e23d566183175 | /scripts/review/pG4AnnotationCentro.py | 9fc03e08cd61524fd912b5fe5aae733faa53290a | []
| no_license | https://github.com/UdeS-CoBIUS/G4Conservation | 3b6dd072ef99cbb5b91af4ac30a8500fcc8433d0 | 386fb776d7aa4f0055bb4b6a2aeeca23948973ee | refs/heads/master | 2023-08-24T20:30:03.671649 | 2021-10-13T14:31:40 | 2021-10-13T14:31:40 | 375,058,383 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-:v
import os
import argparse
import pandas as pd
from pprint import pprint
"""
Copyright:
Copyright Universite of Sherbrooke, departement of biochemistry and
departement of computation.
Date:
Jully 2020
Description:
This script reads all ouput files from G4RNA Screener under the name
'Sequences_WT_xxxxx.csv'. Overlapping windows will be merged. Here are the
columns in the output : Strand, Chromosome, locStart, locEnd, GeneID,
Location, TranscriptID, meancGcC, meanG4H, meanG4NN, pG4Start, pG4End,
G4Sequence.
"""
def mergeOverlappingSequences(dfTmp):
"""Merge the sequences of overlaping windows.
:param dfTmp: contain overlaping windows.
:type dfTmp: dataFrame
:returns: seq, sequence merged.
:rtype: string
"""
dfTmp = dfTmp.sort_values(by=['wStart'])
seq = str(dfTmp.seqG4.iloc[0])
for w in range(1,len(dfTmp)):
stepTmp = int(dfTmp.wStart.iloc[w] - dfTmp.wEnd.iloc[w-1])-1
# convert to int elsewise it's a float
wSeq = dfTmp.seqG4.iloc[w]
seq += wSeq[-stepTmp:]
return seq
def getInfo(df):
"""Retrieves informations of a windows and parse it into a dictionary.
As gene windows and junction windows are not formated the same way, this
function aims to parse them into the same type of dictionary.
:param df: contain all overlaping windows.
:type df: dataFrame
:returns: dico, contains all infromation for one window.
:rtype: dictionary
"""
geneDesc = df.geneDesc.iloc[0]
geneDescSplit = geneDesc.split(':')
dico = {'Gene' : [geneDescSplit[0]],
'meancGcC' : [df.cGcC.mean()],
'meanG4H' : [df.G4H.mean()],
'meanG4NN' : [df.G4NN.mean()],
'pG4Start' : [min(df.wStart)],
'pG4End' : [max(df.wEnd)]}
# dico['Chromosome'] = [geneDescSplit[0]]
# dico['Strand'] = [geneDescSplit[2]]
return dico
def mergeWindows(df):
"""Merge overlaping windows.
:param df: contain overlaping windows.
:type df: dataFrame
:returns: pG4, contains the pG4 which is the merge of overlaping windows.
:rtype: dictionary
"""
pG4rSeq = mergeOverlappingSequences(df)
if len(pG4rSeq) >= 20:
dicoInfo = getInfo(df)
pG4Start = dicoInfo['pG4Start'][0]
pG4End = dicoInfo['pG4End'][0]
pG4 = {}
pG4 = dicoInfo
pG4['pG4Start'] = [min(pG4Start, pG4End)]
pG4['pG4End'] = [max(pG4Start, pG4End)]
pG4['Sequence'] = [pG4rSeq]
pG4['Description'] = [df.geneDesc.iloc[0]]
else:
pG4 = {}
return pG4
def filterOnScores(dicoParam, dfWindows):
"""Filter the windows based on thresholds.
:param dicoParam: contains all parameters that were given to g4rna screener.
:type dicoParam: dictionnary
:param dfWindows: contains all windows of all genes from one specie.
:type dfWindows: dataframe
:returns: dfWindows, with only windows upper thresholds.
:rtype: dataFrame
"""
dfWindows = dfWindows[ dfWindows.cGcC >= dicoParam["cGcC"] ].dropna()
dfWindows = dfWindows[ dfWindows.G4H >= dicoParam["G4H"] ].dropna()
dfWindows = dfWindows[ dfWindows.G4NN >= dicoParam["G4NN"] ].dropna()
return dfWindows
def mergeG4(df, dicoParam):
"""Browses all junction window to find those that are overlapping.
Here we browse all junctions windows. We will only kept those that overlap
the 100 nucleotid. Indeed, if the window over thresholds don't overlap this
position, it only in a gene and not a junction.
:param df: contains all windows.
:type df: dataFrame
:param dicoParam: contains all parameters that were given to g4rna screener.
:type dicoParam: dictionnary
:returns: dfpG4, contain all pG4 for that strand.
:rtype: dataFrame
"""
dfTmp = pd.DataFrame()
dfpG4 = pd.DataFrame()
dfTmp = dfTmp.append(df[0:1]) # store the first window
if len(df) == 1:
pG4 = mergeWindows(dfTmp)
dfTmp = pd.DataFrame.from_dict(pG4)
dfpG4 = dfpG4.append(dfTmp)
else:
for w in range(1,len(df)): # w for window
pG4 = mergeWindows(dfTmp)
# browses all windows over thresholds, exept the first one
if (df.geneDesc.iloc[w] == df.geneDesc.iloc[w-1] and
(df.wStart.iloc[w] >= df.wStart.iloc[w-1] and \
df.wStart.iloc[w] <= df.wEnd.iloc[w-1])):
# if window overlap, add window at the current pG4
dfTmp = dfTmp.append(df[w:w+1])
if w == len(df)-1:
pG4 = mergeWindows(dfTmp)
dfTmp = pd.DataFrame.from_dict(pG4)
dfpG4 = dfpG4.append(dfTmp)
else: # new pG4
pG4 = mergeWindows(dfTmp)
dfTmp = pd.DataFrame.from_dict(pG4)
dfpG4 = dfpG4.append(dfTmp)
dfTmp = df.iloc[w:w+1]
if w == len(df)-1 :
pG4 = mergeWindows(dfTmp)
dfTmp = pd.DataFrame.from_dict(pG4)
dfpG4 = dfpG4.append(dfTmp)
return dfpG4
def merge(filename, dicoParam, repro):
dfpG42 = pd.DataFrame()
try:
dfWindows = pd.read_csv(filename, sep='\t', index_col=0)
except:
# print("This file couldn't be converted in data frame : " + filename)
pass
else:
# dataFrame with all windows from G4RNA Screener
if filename == '/home/anais/Documents/Projet/G4Conservation/reviewTRCentro/saccharomyces_cerevisiae/CSVFile/Sequences_centromere_00001.csv':
print(dfWindows)
dfWindows.columns = ['geneDesc','cGcC',
'G4H','seqG4','wStart',
'wEnd', 'G4NN']
dfWindows = filterOnScores(dicoParam, dfWindows)
if filename == '/home/anais/Documents/Projet/G4Conservation/reviewTRCentro/saccharomyces_cerevisiae/CSVFile/Sequences_centromere_00001.csv':
print(dfWindows)
print('---------')
dfpG42 = dfpG42.append(mergeG4(dfWindows, dicoParam))
dfpG42['Repro'] = repro
return dfpG42
def main(dicoParam, directory, repro):
dfpG4MonoGene = pd.DataFrame()
dfpG4DiGene = pd.DataFrame()
dfpG4TriGene = pd.DataFrame()
dfpG4WT = pd.DataFrame()
for path, dirs, files in os.walk(directory):
for file in files:
if '_00' in file and '.csv' in file and 'centro' in file:
inputfile = directory+'/CSVFile/'+file
print(inputfile)
if '_Mono_' in file:
dfpG4MonoGene = dfpG4MonoGene.append(merge(inputfile, dicoParam, repro))
dfpG4MonoGene = dfpG4MonoGene.reset_index(drop=True)
elif '_Tri_' in file:
dfpG4TriGene = dfpG4TriGene.append(merge(inputfile, dicoParam, repro))
dfpG4TriGene = dfpG4TriGene.reset_index(drop=True)
elif 'Sequences_centromere' in file:
dfpG4WT = dfpG4WT.append(merge(inputfile, dicoParam, repro))
dfpG4WT = dfpG4WT.reset_index(drop=True)
if len(dfpG4MonoGene) > 0:
dfpG4MonoGene = dfpG4MonoGene.drop_duplicates(subset=None, keep='first', inplace=False)
dfpG4MonoGene = dfpG4MonoGene.reset_index(drop=True)
if len(dfpG4TriGene) > 0:
dfpG4TriGene = dfpG4TriGene.drop_duplicates(subset=None, keep='first', inplace=False)
dfpG4TriGene = dfpG4TriGene.reset_index(drop=True)
if len(dfpG4WT) > 0:
dfpG4WT = dfpG4WT.drop_duplicates(subset=None, keep='first', inplace=False)
dfpG4WT = dfpG4WT.reset_index(drop=True)
# dfpG4MonoGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Mono_Micro.csv', header=True, index=None, sep='\t')
# dfpG4TriGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Tri_Micro.csv', header=True, index=None, sep='\t')
# dfpG4WT.to_csv(path_or_buf=directory+'/pG4_Shuffle_WT_Micro.csv', header=True, index=None, sep='\t')
dfpG4MonoGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Mono_Centro.csv', header=True, index=None, sep='\t')
dfpG4TriGene.to_csv(path_or_buf=directory+'/pG4_Shuffle_Tri_Centro.csv', header=True, index=None, sep='\t')
dfpG4WT.to_csv(path_or_buf=directory+'/pG4_Shuffle_WT_Centro.csv', header=True, index=None, sep='\t')
def createDicoParam(arg):
"""Retrieves arguments and put them in a dictionary.
:param arg: contains all arguments given to the script, those are principaly
parameters from G4RNA Screener.
:type arg: arg_parser
:returns: dicoParam, contains all arguments given to the script.
:rtype: dictionary
"""
dicoParam = {"G4H" : float(arg.THRESHOLD_G4H),
"cGcC" : float(arg.THRESHOLD_CGCC),
"G4NN" : float(arg.THRESHOLD_G4NN),
"windowLength" : int(arg.WINDOW),
"step" : int(arg.STEP)}
return dicoParam
def build_arg_parser():
parser = argparse.ArgumentParser(description = 'G4Annotation')
# parser.add_argument ('-p', '--path', default = '/home/vana2406/scratch/'+\
# 'G4Conservation/reviewTRCentro/')
parser.add_argument ('-p', '--path', default = '/home/anais/Documents/Projet/'+\
'G4Conservation/reviewTRCentro/')
parser.add_argument ('-sp', '--specie', default = \
'escherichia_coli_str_k_12_substr_mg1655')
parser.add_argument ('-r', '--repro', default = '1')
parser.add_argument ('-G4H', '--THRESHOLD_G4H', default = 0.9)
parser.add_argument ('-CGCC', '--THRESHOLD_CGCC', default = 4.5)
parser.add_argument ('-G4NN', '--THRESHOLD_G4NN', default = 0.5)
parser.add_argument ('-W', '--WINDOW', default = 60)
parser.add_argument ('-S', '--STEP', default = 10)
return parser
if __name__ == '__main__':
parser = build_arg_parser()
arg = parser.parse_args()
sp = arg.specie
repro = arg.repro
if repro == 'Wt':
path = arg.path+sp
else:
path = arg.path+sp+'/Repro'+repro
print("specie : " + sp)
dicoParam = createDicoParam(arg)
main(dicoParam, path, repro)
print("\tDone")
| UTF-8 | Python | false | false | 9,025 | py | 94 | pG4AnnotationCentro.py | 36 | 0.701496 | 0.678449 | 0 | 262 | 33.446565 | 142 |
ShenghuiXue/django | 11,905,649,354,783 | 0629fa0847b5093dce354f97c315e19809e9d40a | a9d06b567bea638123b03ca3685cad0c40414fb9 | /python_basics/list.py | d5ca141479e53300ef031421bfaac030cbd9fef7 | []
| no_license | https://github.com/ShenghuiXue/django | a1cc3f198c0dd83e555525478ebd43713ad89ae0 | 53dc175f5d00f6ddb4eb7a42807ce31e6b444327 | refs/heads/master | 2020-03-22T10:05:57.723645 | 2018-07-05T17:35:28 | 2018-07-05T17:35:28 | 139,880,798 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | matrix = [[1,2,3],[4,5,6],[7,8,9]]
# list comprehesion
first_col = [row[0] for row in matrix]
print (first_col)
| UTF-8 | Python | false | false | 113 | py | 6 | list.py | 5 | 0.619469 | 0.530973 | 0 | 5 | 21.6 | 38 |
hkristen/datacube-core | 6,390,911,382,554 | 14afff4feacc4db0669bec61820343c02f2f42eb | 8cb57b10a3ccfdf55c4eb2cc2136dcee5750524c | /datacube/index/_datasets.py | a42f6049050535b6b45372d4c5c10ffabb8a0dcc | [
"Apache-2.0"
]
| permissive | https://github.com/hkristen/datacube-core | bc88f0f84f9199887d02068dca7d895e4207bbf6 | 7c1563c086a71c6bea9349af2439459f61a35a94 | refs/heads/develop | 2021-01-23T00:20:12.579584 | 2017-03-29T08:10:35 | 2017-03-29T08:10:35 | 85,717,021 | 0 | 1 | null | true | 2017-03-21T15:12:29 | 2017-03-21T15:12:29 | 2017-03-20T05:47:58 | 2017-03-21T05:10:49 | 34,192 | 0 | 0 | 0 | null | null | null | # coding=utf-8
"""
API for dataset indexing, access and search.
"""
from __future__ import absolute_import
import logging
import warnings
from collections import namedtuple
from uuid import UUID
from cachetools.func import lru_cache
from datacube import compat
from datacube.index.fields import Field
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.utils import InvalidDocException, jsonify_document, changes
from datacube.utils.changes import get_doc_changes, check_doc_unchanged
from . import fields
from .exceptions import DuplicateRecordError
_LOG = logging.getLogger(__name__)
try:
from typing import Any, Iterable, Mapping, Set, Tuple, Union
except ImportError:
pass
# It's a public api, so we can't reorganise old methods.
# pylint: disable=too-many-public-methods, too-many-lines
class MetadataTypeResource(object):
def __init__(self, db):
"""
:type db: datacube.index.postgres._connections.PostgresDb
"""
self._db = db
def from_doc(self, definition):
"""
:param dict definition:
:rtype: datacube.model.MetadataType
"""
MetadataType.validate(definition)
return self._make(definition)
def add(self, metadata_type, allow_table_lock=False):
"""
:param datacube.model.MetadataType metadata_type:
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
:rtype: datacube.model.MetadataType
"""
# This column duplication is getting out of hand:
MetadataType.validate(metadata_type.definition)
existing = self.get_by_name(metadata_type.name)
if existing:
# They've passed us the same one again. Make sure it matches what is stored.
check_doc_unchanged(
existing.definition,
jsonify_document(metadata_type.definition),
'Metadata Type {}'.format(metadata_type.name)
)
else:
with self._db.connect() as connection:
connection.add_metadata_type(
name=metadata_type.name,
definition=metadata_type.definition,
concurrently=not allow_table_lock
)
return self.get_by_name(metadata_type.name)
def can_update(self, metadata_type, allow_unsafe_updates=False):
"""
Check if metadata type can be updated. Return bool,safe_changes,unsafe_changes
Safe updates currently allow new search fields to be added, description to be changed.
:param datacube.model.MetadataType metadata_type: updated MetadataType
:param bool allow_unsafe_updates: Allow unsafe changes. Use with caution.
:rtype: bool,list[change],list[change]
"""
MetadataType.validate(metadata_type.definition)
existing = self.get_by_name(metadata_type.name)
if not existing:
raise ValueError('Unknown metadata type %s, cannot update – did you intend to add it?' % metadata_type.name)
updates_allowed = {
('description',): changes.allow_any,
# You can add new fields safely but not modify existing ones.
('dataset',): changes.allow_extension,
('dataset', 'search_fields'): changes.allow_extension
}
doc_changes = get_doc_changes(existing.definition, jsonify_document(metadata_type.definition))
good_changes, bad_changes = changes.classify_changes(doc_changes, updates_allowed)
return allow_unsafe_updates or not bad_changes, good_changes, bad_changes
def update(self, metadata_type, allow_unsafe_updates=False, allow_table_lock=False):
"""
Update a metadata type from the document. Unsafe changes will throw a ValueError by default.
Safe updates currently allow new search fields to be added, description to be changed.
:param datacube.model.MetadataType metadata_type: updated MetadataType
:param bool allow_unsafe_updates: Allow unsafe changes. Use with caution.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slower and cannot be done in a transaction.
:rtype: datacube.model.MetadataType
"""
can_update, safe_changes, unsafe_changes = self.can_update(metadata_type, allow_unsafe_updates)
if not safe_changes and not unsafe_changes:
_LOG.info("No changes detected for metadata type %s", metadata_type.name)
return
if not can_update:
full_message = "Unsafe changes at " + ", ".join(".".join(offset) for offset, _, _ in unsafe_changes)
raise ValueError(full_message)
_LOG.info("Updating metadata type %s", metadata_type.name)
for offset, old_val, new_val in safe_changes:
_LOG.info("Safe change from %r to %r", old_val, new_val)
for offset, old_val, new_val in unsafe_changes:
_LOG.info("Unsafe change from %r to %r", old_val, new_val)
with self._db.connect() as connection:
connection.update_metadata_type(
name=metadata_type.name,
definition=metadata_type.definition,
concurrently=not allow_table_lock
)
self.get_by_name_unsafe.cache_clear()
self.get_unsafe.cache_clear()
def update_document(self, definition, allow_unsafe_updates=False):
"""
Update a metadata type from the document. Unsafe changes will throw a ValueError by default.
Safe updates currently allow new search fields to be added, description to be changed.
:param dict definition: Updated definition
:param bool allow_unsafe_updates: Allow unsafe changes. Use with caution.
:rtype: datacube.model.MetadataType
"""
return self.update(self.from_doc(definition), allow_unsafe_updates=allow_unsafe_updates)
def get(self, id_):
"""
:rtype: datacube.model.MetadataType
"""
try:
return self.get_unsafe(id_)
except KeyError:
return None
def get_by_name(self, name):
"""
:rtype: datacube.model.MetadataType
"""
try:
return self.get_by_name_unsafe(name)
except KeyError:
return None
@lru_cache()
def get_unsafe(self, id_):
with self._db.connect() as connection:
record = connection.get_metadata_type(id_)
if record is None:
raise KeyError('%s is not a valid MetadataType id')
return self._make_from_query_row(record)
@lru_cache()
def get_by_name_unsafe(self, name):
with self._db.connect() as connection:
record = connection.get_metadata_type_by_name(name)
if not record:
raise KeyError('%s is not a valid MetadataType name' % name)
return self._make_from_query_row(record)
def check_field_indexes(self, allow_table_lock=False, rebuild_all=None,
rebuild_views=False, rebuild_indexes=False):
"""
Create or replace per-field indexes and views.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
"""
if rebuild_all is not None:
warnings.warn(
"The rebuild_all option of check_field_indexes() is deprecated.",
"Instead, use rebuild_views=True or rebuild_indexes=True as needed.",
DeprecationWarning)
rebuild_views = rebuild_indexes = rebuild_all
with self._db.connect() as connection:
connection.check_dynamic_fields(
concurrently=not allow_table_lock,
rebuild_indexes=rebuild_indexes,
rebuild_views=rebuild_views,
)
def get_all(self):
"""
Retrieve all Metadata Types
:rtype: iter[datacube.model.MetadataType]
"""
with self._db.connect() as connection:
return self._make_many(connection.get_all_metadata_types())
def _make_many(self, query_rows):
"""
:rtype: list[datacube.model.MetadataType]
"""
return (self._make_from_query_row(c) for c in query_rows)
def _make_from_query_row(self, query_row):
"""
:rtype: datacube.model.MetadataType
"""
return self._make(query_row['definition'], query_row['id'])
def _make(self, definition, id_=None):
"""
:param dict definition:
:param int id_:
:rtype: datacube.model.MetadataType
"""
return MetadataType(
definition,
dataset_search_fields=self._db.get_dataset_fields(definition['dataset']['search_fields']),
id_=id_
)
class ProductResource(object):
"""
:type _db: datacube.index.postgres._connections.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.index.postgres._connections.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
def from_doc(self, definition):
"""
Create a Product from its definitions
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
# This column duplication is getting out of hand:
DatasetType.validate(definition)
metadata_type = definition['metadata_type']
# They either specified the name of a metadata type, or specified a metadata type.
# Is it a name?
if isinstance(metadata_type, compat.string_types):
metadata_type = self.metadata_type_resource.get_by_name(metadata_type)
else:
# Otherwise they embedded a document, add it if needed:
metadata_type = self.metadata_type_resource.from_doc(metadata_type)
definition = definition.copy()
definition['metadata_type'] = metadata_type.name
if not metadata_type:
raise InvalidDocException('Unknown metadata type: %r' % definition['metadata_type'])
return DatasetType(metadata_type, definition)
def add(self, type_, allow_table_lock=False):
"""
Add a Product.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
:param datacube.model.DatasetType type_: Product to add
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self.get_by_name(type_.name)
if existing:
check_doc_unchanged(
existing.definition,
jsonify_document(type_.definition),
'Metadata Type {}'.format(type_.name)
)
else:
metadata_type = self.metadata_type_resource.get_by_name(type_.metadata_type.name)
if metadata_type is None:
_LOG.warning('Adding metadata_type "%s" as it doesn\'t exist.', type_.metadata_type.name)
metadata_type = self.metadata_type_resource.add(type_.metadata_type, allow_table_lock=allow_table_lock)
with self._db.connect() as connection:
connection.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=metadata_type.id,
search_fields=metadata_type.dataset_fields,
definition=type_.definition,
concurrently=not allow_table_lock,
)
return self.get_by_name(type_.name)
def can_update(self, product, allow_unsafe_updates=False):
"""
Check if product can be updated. Return bool,safe_changes,unsafe_changes
(An unsafe change is anything that may potentially make the product
incompatible with existing datasets of that type)
:param datacube.model.DatasetType product: Product to update
:param bool allow_unsafe_updates: Allow unsafe changes. Use with caution.
:rtype: bool,list[change],list[change]
"""
DatasetType.validate(product.definition)
existing = self.get_by_name(product.name)
if not existing:
raise ValueError('Unknown product %s, cannot update – did you intend to add it?' % product.name)
updates_allowed = {
('description',): changes.allow_any,
('metadata_type',): changes.allow_any,
# You can safely make the match rules looser but not tighter.
# Tightening them could exclude datasets already matched to the product.
# (which would make search results wrong)
('metadata',): changes.allow_truncation
}
doc_changes = get_doc_changes(existing.definition, jsonify_document(product.definition))
good_changes, bad_changes = changes.classify_changes(doc_changes, updates_allowed)
return allow_unsafe_updates or not bad_changes, good_changes, bad_changes
def update(self, product, allow_unsafe_updates=False, allow_table_lock=False):
"""
Update a product. Unsafe changes will throw a ValueError by default.
(An unsafe change is anything that may potentially make the product
incompatible with existing datasets of that type)
:param datacube.model.DatasetType product: Product to update
:param bool allow_unsafe_updates: Allow unsafe changes. Use with caution.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slower and cannot be done in a transaction.
:rtype: datacube.model.DatasetType
"""
can_update, safe_changes, unsafe_changes = self.can_update(product, allow_unsafe_updates)
if not safe_changes and not unsafe_changes:
_LOG.info("No changes detected for product %s", product.name)
return
if not can_update:
full_message = "Unsafe changes at " + ", ".join(".".join(offset) for offset, _, _ in unsafe_changes)
raise ValueError(full_message)
_LOG.info("Updating product %s", product.name)
for offset, old_val, new_val in safe_changes:
_LOG.info("Safe change from %r to %r", old_val, new_val)
for offset, old_val, new_val in unsafe_changes:
_LOG.info("Unsafe change from %r to %r", old_val, new_val)
existing = self.get_by_name(product.name)
changing_metadata_type = product.metadata_type.name != existing.metadata_type.name
if changing_metadata_type:
raise ValueError("Unsafe change: cannot (currently) switch metadata types for a product")
# TODO: Ask Jeremy WTF is going on here
# If the two metadata types declare the same field with different postgres expressions
# we can't safely change it.
# (Replacing the index would cause all existing users to have no effective index)
# for name, field in existing.metadata_type.dataset_fields.items():
# new_field = type_.metadata_type.dataset_fields.get(name)
# if new_field and (new_field.sql_expression != field.sql_expression):
# declare_unsafe(
# ('metadata_type',),
# 'Metadata type change results in incompatible index '
# 'for {!r} ({!r} → {!r})'.format(
# name, field.sql_expression, new_field.sql_expression
# )
# )
metadata_type = self.metadata_type_resource.get_by_name(product.metadata_type.name)
# TODO: should we add metadata type here?
assert metadata_type, "TODO: should we add metadata type here?"
with self._db.connect() as conn:
conn.update_dataset_type(
name=product.name,
metadata=product.metadata_doc,
metadata_type_id=metadata_type.id,
search_fields=metadata_type.dataset_fields,
definition=product.definition,
update_metadata_type=changing_metadata_type,
concurrently=not allow_table_lock
)
self.get_by_name_unsafe.cache_clear()
self.get_unsafe.cache_clear()
def update_document(self, definition, allow_unsafe_updates=False, allow_table_lock=False):
"""
Update a Product using its definition
:param bool allow_unsafe_updates: Allow unsafe changes. Use with caution.
:param dict definition: product definition document
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slower and cannot be done in a transaction.
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.update(
type_,
allow_unsafe_updates=allow_unsafe_updates,
allow_table_lock=allow_table_lock,
)
def add_document(self, definition):
"""
Add a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.add(type_)
def get(self, id_):
"""
Retrieve Product by id
:param int id_: id of the Product
:rtype: datacube.model.DatasetType
"""
try:
return self.get_unsafe(id_)
except KeyError:
return None
def get_by_name(self, name):
"""
Retrieve Product by name
:param str name: name of the Product
:rtype: datacube.model.DatasetType
"""
try:
return self.get_by_name_unsafe(name)
except KeyError:
return None
@lru_cache()
def get_unsafe(self, id_):
with self._db.connect() as connection:
result = connection.get_dataset_type(id_)
if not result:
raise KeyError('"%s" is not a valid Product id' % id_)
return self._make(result)
@lru_cache()
def get_by_name_unsafe(self, name):
with self._db.connect() as connection:
result = connection.get_dataset_type_by_name(name)
if not result:
raise KeyError('"%s" is not a valid Product name' % name)
return self._make(result)
def get_with_fields(self, field_names):
"""
Return dataset types that have all the given fields.
:param tuple[str] field_names:
:rtype: __generator[DatasetType]
"""
for type_ in self.get_all():
for name in field_names:
if name not in type_.metadata_type.dataset_fields:
break
else:
yield type_
def search(self, **query):
"""
Return dataset types that have all the given fields.
:param dict query:
:rtype: __generator[DatasetType]
"""
for type_, q in self.search_robust(**query):
if not q:
yield type_
def search_robust(self, **query):
"""
Return dataset types that match match-able fields and dict of remaining un-matchable fields.
:param dict query:
:rtype: __generator[(DatasetType, dict)]
"""
def _listify(v):
return v if isinstance(v, list) else [v]
for type_ in self.get_all():
remaining_matchable = query.copy()
# If they specified specific product/metadata-types, we can quickly skip non-matches.
if type_.name not in _listify(remaining_matchable.pop('product', type_.name)):
continue
if type_.metadata_type.name not in _listify(remaining_matchable.pop('metadata_type',
type_.metadata_type.name)):
continue
# Check that all the keys they specified match this product.
for key, value in list(remaining_matchable.items()):
field = type_.metadata_type.dataset_fields.get(key)
if not field:
# This type doesn't have that field, so it cannot match.
break
if not hasattr(field, 'extract'):
# non-document/native field
continue
if field.extract(type_.metadata_doc) is None:
# It has this field but it's not defined in the type doc, so it's unmatchable.
continue
expr = fields.as_expression(field, value)
if expr.evaluate(type_.metadata_doc):
remaining_matchable.pop(key)
else:
# A property doesn't match this type, skip to next type.
break
else:
yield type_, remaining_matchable
def get_all(self):
# type: () -> Iterable[DatasetType]
"""
Retrieve all Products
:rtype: iter[datacube.model.DatasetType]
"""
with self._db.connect() as connection:
return (self._make(record) for record in connection.get_all_dataset_types())
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype datacube.model.DatasetType
"""
return DatasetType(
definition=query_row['definition'],
metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']),
id_=query_row['id'],
)
class DatasetResource(object):
"""
:type _db: datacube.index.postgres._connections.PostgresDb
:type types: datacube.index._datasets.ProductResource
"""
def __init__(self, db, dataset_type_resource):
"""
:type db: datacube.index.postgres._connections.PostgresDb
:type dataset_type_resource: datacube.index._datasets.ProductResource
"""
self._db = db
self.types = dataset_type_resource
def get(self, id_, include_sources=False):
"""
Get dataset by id
:param UUID id_: id of the dataset to retrieve
:param bool include_sources: get the full provenance graph?
:rtype: datacube.model.Dataset
"""
if isinstance(id_, compat.string_types):
id_ = UUID(id_)
with self._db.connect() as connection:
if not include_sources:
dataset = connection.get_dataset(id_)
return self._make(dataset, full_info=True) if dataset else None
datasets = {result['id']: (self._make(result, full_info=True), result)
for result in connection.get_dataset_sources(id_)}
if not datasets:
# No dataset found
return None
for dataset, result in datasets.values():
dataset.metadata_doc['lineage']['source_datasets'] = {
classifier: datasets[source][0].metadata_doc
for source, classifier in zip(result['sources'], result['classes']) if source
}
dataset.sources = {
classifier: datasets[source][0]
for source, classifier in zip(result['sources'], result['classes']) if source
}
return datasets[id_][0]
def get_derived(self, id_):
"""
Get all derived datasets
:param UUID id_: dataset id
:rtype: list[datacube.model.Dataset]
"""
with self._db.connect() as connection:
return [self._make(result, full_info=True)
for result in connection.get_derived_datasets(id_)]
def has(self, id_):
"""
Have we already indexed this dataset?
:param typing.Union[UUID, str] id_: dataset id
:rtype: bool
"""
with self._db.connect() as connection:
return connection.contains_dataset(id_)
def add(self, dataset, skip_sources=False, sources_policy='verify'):
"""
Ensure a dataset is in the index. Add it if not present.
:param datacube.model.Dataset dataset: dataset to add
:param str sources_policy: one of 'verify' - verify the metadata, 'ensure' - add if doesn't exist, 'skip' - skip
:param bool skip_sources: don't attempt to index source datasets (use when sources are already indexed)
:rtype: datacube.model.Dataset
"""
if skip_sources:
warnings.warn('"skip_sources" is deprecated, use "sources_policy"', DeprecationWarning)
sources_policy = 'skip'
self._add_sources(dataset, sources_policy)
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
_LOG.info('Indexing %s', dataset.id)
if not self._try_add(dataset):
existing = self.get(dataset.id)
if existing:
check_doc_unchanged(
existing.metadata_doc,
jsonify_document(dataset.metadata_doc),
'Dataset {}'.format(dataset.id)
)
# reinsert attempt? try updating the location
if dataset.local_uri:
try:
with self._db.connect() as connection:
connection.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def search_product_duplicates(self, product, *group_fields):
# type: (DatasetType, Iterable[Union[str, Field]]) -> Iterable[tuple, Set[UUID]]
"""
Find dataset ids who have duplicates of the given set of field names.
Product is always inserted as the first grouping field.
Returns each set of those field values and the datasets that have them.
"""
def load_field(f):
# type: (Union[str, Field]) -> Field
if isinstance(f, compat.string_types):
return product.metadata_type.dataset_fields[f]
assert isinstance(f, Field), "Not a field: %r" % (f,)
return f
group_fields = [load_field(f) for f in group_fields]
result_type = namedtuple('search_result', (f.name for f in group_fields))
expressions = [product.metadata_type.dataset_fields.get('product') == product.name]
with self._db.connect() as connection:
for record in connection.get_duplicates(group_fields, expressions):
dataset_ids = set(record[0])
grouped_fields = tuple(record[1:])
yield result_type(*grouped_fields), dataset_ids
def _add_sources(self, dataset, sources_policy='verify'):
if dataset.sources is None:
raise ValueError("Dataset has missing (None) sources. Was this loaded without include_sources=True?")
if sources_policy == 'ensure':
for source in dataset.sources.values():
if not self.has(source.id):
self.add(source, sources_policy=sources_policy)
elif sources_policy == 'verify':
for source in dataset.sources.values():
self.add(source, sources_policy=sources_policy)
elif sources_policy != 'skip':
raise ValueError('sources_policy must be one of ("verify", "ensure", "skip")')
def can_update(self, dataset, updates_allowed=None):
"""
Check if dataset can be updated. Return bool,safe_changes,unsafe_changes
:param datacube.model.Dataset dataset: Dataset to update
:param dict updates_allowed: Allowed updates
:rtype: bool,list[change],list[change]
"""
existing = self.get(dataset.id, include_sources=True)
if not existing:
raise ValueError('Unknown dataset %s, cannot update – did you intend to add it?' % dataset.id)
if dataset.type.name != existing.type.name:
raise ValueError('Changing product is not supported. From %s to %s in %s' % (existing.type.name,
dataset.type.name,
dataset.id))
# TODO: figure out (un)safe changes from metadata type?
allowed = {
# can always add more metadata
tuple(): changes.allow_extension,
}
allowed.update(updates_allowed or {})
doc_changes = get_doc_changes(existing.metadata_doc, jsonify_document(dataset.metadata_doc))
good_changes, bad_changes = changes.classify_changes(doc_changes, allowed)
return not bad_changes, good_changes, bad_changes
def update(self, dataset, updates_allowed=None):
"""
Update dataset metadata and location
:param datacube.model.Dataset dataset: Dataset to update
:param updates_allowed: Allowed updates
:return:
"""
existing = self.get(dataset.id)
can_update, safe_changes, unsafe_changes = self.can_update(dataset, updates_allowed)
if not safe_changes and not unsafe_changes:
if dataset.local_uri != existing.local_uri:
with self._db.begin() as transaction:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
_LOG.info("No changes detected for dataset %s", dataset.id)
return
if not can_update:
full_message = "Unsafe changes at " + ", ".join(".".join(offset) for offset, _, _ in unsafe_changes)
raise ValueError(full_message)
_LOG.info("Updating dataset %s", dataset.id)
for offset, old_val, new_val in safe_changes:
_LOG.info("Safe change from %r to %r", old_val, new_val)
for offset, old_val, new_val in unsafe_changes:
_LOG.info("Unsafe change from %r to %r", old_val, new_val)
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
product = self.types.get_by_name(dataset.type.name)
with self._db.begin() as transaction:
if not transaction.update_dataset(dataset.metadata_doc, dataset.id, product.id):
raise ValueError("Failed to update dataset %s..." % dataset.id)
if dataset.local_uri != existing.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def archive(self, ids):
"""
Mark datasets as archived
:param list[UUID] ids: list of dataset ids to archive
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.archive_dataset(id_)
def restore(self, ids):
"""
Mark datasets as not archived
:param list[UUID] ids: list of dataset ids to restore
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.restore_dataset(id_)
def get_field_names(self, type_name=None):
"""
:param str type_name:
:rtype: set[str]
"""
if type_name is None:
types = self.types.get_all()
else:
types = [self.types.get_by_name(type_name)]
out = set()
for type_ in types:
out.update(type_.metadata_type.dataset_fields)
return out
def get_locations(self, id_):
"""
:param typing.Union[UUID, str] id_: dataset id
:rtype: list[str]
"""
if isinstance(id_, Dataset):
warnings.warn("Passing dataset is deprecated after 1.2.2, pass dataset.id", DeprecationWarning)
id_ = id_.id
with self._db.connect() as connection:
return connection.get_locations(id_)
def add_location(self, id_, uri):
"""
Add a location to the dataset if it doesn't already exist.
:param typing.Union[UUID, str] id_: dataset id
:param str uri: fully qualified uri
:returns bool: Was one added?
"""
if isinstance(id_, Dataset):
warnings.warn("Passing dataset is deprecated after 1.2.2, pass dataset.id", DeprecationWarning)
id_ = id_.id
with self._db.connect() as connection:
try:
connection.ensure_dataset_location(id_, uri)
return True
except DuplicateRecordError:
return False
def get_datasets_for_location(self, uri):
with self._db.connect() as connection:
return (self._make(row) for row in connection.get_datasets_for_location(uri))
def remove_location(self, id_, uri):
"""
Remove a location from the dataset if it exists.
:param typing.Union[UUID, str] id_: dataset id
:param str uri: fully qualified uri
:returns bool: Was one removed?
"""
if isinstance(id_, Dataset):
warnings.warn("Passing dataset is deprecated after 1.2.2, pass dataset.id", DeprecationWarning)
id_ = id_.id
with self._db.connect() as connection:
was_removed = connection.remove_location(id_, uri)
return was_removed
def _make(self, dataset_res, full_info=False):
"""
:rtype datacube.model.Dataset
:param bool full_info: Include all available fields
"""
uri = dataset_res.uri
return Dataset(
self.types.get(dataset_res.dataset_type_ref),
dataset_res.metadata,
# We guarantee that this property on the class is only a local uri.
uri if uri and uri.startswith('file:') else None,
indexed_by=dataset_res.added_by if full_info else None,
indexed_time=dataset_res.added if full_info else None,
archived_time=dataset_res.archived
)
def _make_many(self, query_result):
"""
:rtype list[datacube.model.Dataset]
"""
return (self._make(dataset) for dataset in query_result)
def search_by_metadata(self, metadata):
"""
Perform a search using arbitrary metadata, returning results as Dataset objects.
Caution – slow! This will usually not use indexes.
:param dict metadata:
:rtype: list[datacube.model.Dataset]
"""
with self._db.connect() as connection:
for dataset in self._make_many(connection.search_datasets_by_metadata(metadata)):
yield dataset
def search(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[datacube.model.Dataset]
"""
source_filter = query.pop('source_filter', None)
for _, datasets in self._do_search_by_product(query, source_filter=source_filter):
for dataset in self._make_many(datasets):
yield dataset
def search_by_product(self, **query):
"""
Perform a search, returning datasets grouped by product type.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]
"""
for product, datasets in self._do_search_by_product(query):
yield product, self._make_many(datasets)
def search_returning(self, field_names, **query):
"""
Perform a search, returning only the specified fields.
This method can be faster than normal search() if you don't need all fields of each dataset.
It also allows for returning rows other than datasets, such as a row per uri when requesting field 'uri'.
:param tuple[str] field_names:
:param dict[str,str|float|datacube.model.Range] query:
:returns __generator[tuple]: sequence of results, each result is a namedtuple of your requested fields
"""
result_type = namedtuple('search_result', field_names)
for _, results in self._do_search_by_product(query,
return_fields=True,
select_field_names=field_names):
for columns in results:
yield result_type(*columns)
def count(self, **query):
"""
Perform a search, returning count of results.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: int
"""
# This may be optimised into one query in the future.
result = 0
for product_type, count in self._do_count_by_product(query):
result += count
return result
def count_by_product(self, **query):
"""
Perform a search, returning a count of for each matching product type.
:param dict[str,str|float|datacube.model.Range] query:
:returns: Sequence of (product, count)
:rtype: __generator[(datacube.model.DatasetType, int)]]
"""
return self._do_count_by_product(query)
def count_by_product_through_time(self, period, **query):
"""
Perform a search, returning counts for each product grouped in time slices
of the given period.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]
"""
return self._do_time_count(period, query)
def count_product_through_time(self, period, **query):
"""
Perform a search, returning counts for a single product grouped in time slices
of the given period.
Will raise an error if the search terms match more than one product.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]
"""
return next(self._do_time_count(period, query, ensure_single=True))[1]
def _try_add(self, dataset):
was_inserted = False
product = self.types.get_by_name(dataset.type.name)
if product is None:
_LOG.warning('Adding product "%s" as it doesn\'t exist.', dataset.type.name)
product = self.types.add(dataset.type)
if dataset.sources is None:
raise ValueError("Dataset has missing (None) sources. Was this loaded without include_sources=True?")
with self._db.begin() as transaction:
try:
was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, product.id)
for classifier, source_dataset in dataset.sources.items():
transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id)
# try to update location in the same transaction as insertion.
# if insertion fails we'll try updating location later
# if insertion succeeds the location bit can't possibly fail
if dataset.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
return was_inserted
def _get_dataset_types(self, q):
types = set()
if 'product' in q.keys():
types.add(self.types.get_by_name(q['product']))
else:
# Otherwise search any metadata type that has all the given search fields.
types = self.types.get_with_fields(tuple(q.keys()))
if not types:
raise ValueError('No type of dataset has fields: %r', tuple(q.keys()))
return types
def _get_product_queries(self, query):
for product, q in self.types.search_robust(**query):
q['dataset_type_id'] = product.id
yield q, product
def _do_search_by_product(self, query, return_fields=False, select_field_names=None,
with_source_ids=False, source_filter=None):
if source_filter:
product_queries = list(self._get_product_queries(source_filter))
if not product_queries:
# No products match our source filter, so there will be no search results regardless.
_LOG.info("No products match source filter")
return
if len(product_queries) > 1:
raise RuntimeError("Multi-product source filters are not supported. Try adding 'product' field")
source_queries, source_product = product_queries[0]
dataset_fields = source_product.metadata_type.dataset_fields
source_exprs = tuple(fields.to_expressions(dataset_fields.get, **source_queries))
else:
source_exprs = None
product_queries = list(self._get_product_queries(query))
with self._db.connect() as connection:
for q, product in product_queries:
dataset_fields = product.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
select_fields = None
if return_fields:
# if no fields specified, select all
if select_field_names is None:
select_fields = tuple(field for name, field in dataset_fields.items()
if not field.affects_row_selection)
else:
select_fields = tuple(dataset_fields[field_name]
for field_name in select_field_names)
yield (product,
connection.search_datasets(
query_exprs,
source_exprs,
select_fields=select_fields,
with_source_ids=with_source_ids
))
def _do_count_by_product(self, query):
product_queries = self._get_product_queries(query)
with self._db.connect() as connection:
for q, product in product_queries:
dataset_fields = product.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
count = connection.count_datasets(query_exprs)
if count > 0:
yield product, count
def _do_time_count(self, period, query, ensure_single=False):
if 'time' not in query:
raise ValueError('Counting through time requires a "time" range query argument')
query = dict(query)
start, end = query['time']
del query['time']
product_queries = list(self._get_product_queries(query))
if ensure_single:
if len(product_queries) == 0:
raise ValueError('No products match search terms: %r' % query)
if len(product_queries) > 1:
raise ValueError('Multiple products match single query search: %r' %
([dt.name for q, dt in product_queries],))
with self._db.connect() as connection:
for q, product in product_queries:
dataset_fields = product.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
yield product, list(connection.count_datasets_through_time(
start,
end,
period,
dataset_fields.get('time'),
query_exprs
))
def search_summaries(self, **query):
"""
Perform a search, returning just the search fields of each dataset.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[dict]
"""
for _, results in self._do_search_by_product(query, return_fields=True):
for columns in results:
yield dict(columns)
def search_eager(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: list[datacube.model.Dataset]
"""
return list(self.search(**query))
| UTF-8 | Python | false | false | 46,500 | py | 23 | _datasets.py | 14 | 0.592988 | 0.592428 | 0 | 1,172 | 38.667235 | 120 |
achacha/AOS | 3,693,671,898,419 | 1542a46418102dcb78c5525d8703f5f205599d9e | 23553b6bcf0a278776f0a651b24a3c6a22a702f9 | /ALibrary/call_gather_headers.py | 36f0b093c53062952ffa51be7745b19e4dcf99f9 | []
| no_license | https://github.com/achacha/AOS | 24ace04a4228de4d0295379d84cb936c2a33511b | bea07af94f23982d4fceb10a9cf70a7e5467355f | refs/heads/master | 2020-05-17T12:20:35.667655 | 2016-02-10T13:33:43 | 2016-02-10T13:33:43 | 3,578,772 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import os,sys;
BASE_ALIBRARY_PATH=os.path.dirname(os.path.abspath(sys.argv[0]));
#print("BASE_ALIBRARY_PATH="+BASE_ALIBRARY_PATH);
BASE_ENV_PATH=os.path.normpath(os.path.join(BASE_ALIBRARY_PATH,"..","_devtools","bin"));
params="";
for param in sys.argv[1:]:
params = params + param + " ";
os.chdir(BASE_ENV_PATH);
os.system(os.path.join(BASE_ENV_PATH, "gather_headers.py "+params));
| UTF-8 | Python | false | false | 414 | py | 731 | call_gather_headers.py | 615 | 0.678744 | 0.673913 | 0 | 13 | 30.230769 | 88 |
Buor/python-codewars-solutions | 13,572,096,682,866 | 8cd91cd6248b089a01989e083a87e0315fe52cfd | dc37b0ba02bcb195a71988563ec461e3181c2811 | /kyu6/highest_scoring_word.py | 8d93350d8363e3c15810408987507896764aef92 | []
| no_license | https://github.com/Buor/python-codewars-solutions | 08506a24791adea4be8cb7c4479e40ffd604975f | 965218cf18ed132aa2b2f570e165fae79b73e4f0 | refs/heads/master | 2023-08-13T02:01:29.151321 | 2021-09-10T11:36:21 | 2021-09-10T11:36:21 | 402,838,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://www.codewars.com/kata/57eb8fcdf670e99d9b000272
from functools import reduce
high = lambda x: max(x.split(" "), key=lambda y: reduce(lambda z, q: z + q, map(lambda w: ord(w), y)) - len(y) * 96) | UTF-8 | Python | false | false | 202 | py | 10 | highest_scoring_word.py | 10 | 0.688119 | 0.60396 | 0 | 3 | 66.666667 | 116 |
dgretton/pyhamilton | 2,920,577,794,780 | bd766a7e7e29e47a9ca663e8f494bb897cb293b1 | e08bb81a0018d234cc78f365cc4aa64409ef5425 | /examples/96_head_pickup_eject.py | b1d50d806989a3d22f93361b77276e46843d800e | [
"MIT"
]
| permissive | https://github.com/dgretton/pyhamilton | d3b9ffcdbcd8c50f412d27ef5710e6e1dd3c5265 | 4688977308789876ab8163af6f704aa118ab39bd | refs/heads/master | 2023-08-07T16:19:42.086216 | 2023-07-24T17:51:16 | 2023-07-24T17:51:16 | 134,626,220 | 134 | 35 | MIT | false | 2023-05-20T14:50:02 | 2018-05-23T21:14:49 | 2023-05-05T15:22:55 | 2023-05-20T14:50:01 | 66,642 | 116 | 28 | 2 | Python | false | false | import os
from pyhamilton import (HamiltonInterface, LayoutManager, ResourceType, Tip96,
INITIALIZE, PICKUP96, EJECT96)
layfile = os.path.abspath(os.path.join('.', '96_head_pickup_eject.lay'))
lmgr = LayoutManager(layfile)
tip_name_from_line = lambda line: LayoutManager.layline_first_field(line)
tip_name_condition = lambda line: LayoutManager.field_starts_with(tip_name_from_line(line), 'STF_L_')
tips_type = ResourceType(Tip96, tip_name_condition, tip_name_from_line)
tips = lmgr.assign_unused_resource(tips_type)
if __name__ == '__main__':
with HamiltonInterface() as hammy:
print('INITIALIZED!!', hammy.wait_on_response(hammy.send_command(INITIALIZE)))
labware = str(tips.layout_name())
labware_poss = '; '.join([labware + ',' + str(i+1) for i in range(96)]) + ';'
# A dictionary can be unpacked into the command...
cmd_dict = {'labwarePositions':labware_poss}
id = hammy.send_command(PICKUP96, **cmd_dict)
print(hammy.wait_on_response(id, raise_first_exception=True))
# Or the command fields can be specified with keyword arguments
id = hammy.send_command(EJECT96, labwarePositions=labware_poss)
print(hammy.wait_on_response(id, raise_first_exception=True))
| UTF-8 | Python | false | false | 1,257 | py | 46 | 96_head_pickup_eject.py | 30 | 0.693715 | 0.680191 | 0 | 24 | 51.375 | 101 |
AayushDangol123/rasa-chatbot | 515,396,081,820 | d484feb6c4927ebe475a2e59c4e4b45389cfd59d | ad0ffa148f9379878efee7574b194f8e4a9a12bf | /actions.py | 11e1e45e2ba4ab2f5f87478a5ce6e1df264e0415 | []
| no_license | https://github.com/AayushDangol123/rasa-chatbot | a127b5c1a6e11668a5989c0a88b70a8b7a8618f1 | 02c905150e7a0142b6c02539db97f4d05e9cfeea | refs/heads/main | 2023-03-12T15:34:07.685193 | 2021-03-05T07:23:32 | 2021-03-05T07:23:32 | 344,373,849 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
# This is a simple example for a custom action which utters "Hello World!"
# from typing import Any, Text, Dict, List
#
# from rasa_sdk import Action, Tracker
# from rasa_sdk.executor import CollectingDispatcher
#
#
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message(text="Hello World!")
#
# return []
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import csv
class actionfindengr(Action):
def name(self) -> Text:
return "action_findengr"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]
) -> List[Dict[Text, Any]]:
# get the location slot
location = tracker.get_slot('location')
# read the CSV file
with open('data/Engineering colllege.csv','r',encoding = "utf-8") as file:
reader = csv.DictReader(file)
# get a list of universities in the desired location
output = [row for row in reader if row['location'] == location]
if output:
reply = f"This is a list of universities in {location}:"
reply += "\n- " + "\n- ".join([item['College'] for item in output])
# utter the message
dispatcher.utter_message(reply)
else: # the list is empty
dispatcher.utter_message(f"I could not find universities in {location}")
| UTF-8 | Python | false | false | 1,881 | py | 5 | actions.py | 1 | 0.629984 | 0.629452 | 0 | 56 | 32.589286 | 84 |
yicheng-li/yc | 5,463,198,444,978 | 5e3acc44d5dda53a40d768df6ed662a84acde3e6 | d20a2832be223b98a869f97813f5e22f6889abc5 | /new_hg_new/train_parsing_vgg.py | 80bb2d18aea5448597ed2edbf9a38dd05ae57caf | []
| no_license | https://github.com/yicheng-li/yc | c66e9743ad446bc0addba4ea2cc0f637a8137049 | a45092e4d13f6e50d9bd8b66383c5a595aa37b61 | refs/heads/master | 2018-02-07T09:37:54.435081 | 2017-08-08T02:49:34 | 2017-08-08T02:49:34 | 95,753,149 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import tensorflow as tf
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from utils import *
from vgg16 import vggnet
### parameters setting
RANDOM_SEED = 1234
N_CLASSES = 20
INPUT_SIZE = (321, 321)
BATCH_SIZE = 1
DECAY_STEPS = 7000 # 30462 trainning img
MIRROR = False
SCALE = False
SAVE_NUM_IMAGES = 1
LEARNING_RATE = 1e-5
MOMENTUM = 0.9
NUM_STEPS = 20001
DECAY_RATE = 0.9
SAVE_PRED_EVERY = 500
DATA_DIR = './datasets/human'
LIST_PATH = './datasets/human/list/train.txt'
RESTORE_FROM = './model' #'./model/'
SNAPSHOT_DIR = './checkpoint'
LOG_DIR = './logs'
def main():
tf.set_random_seed(RANDOM_SEED)
# Create queue coordinator.
coord = tf.train.Coordinator()
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(DATA_DIR, LIST_PATH, INPUT_SIZE, SCALE, MIRROR, coord)
image_batch, label_batch = reader.dequeue(BATCH_SIZE)
# Create Networks
raw_output = vggnet(image_batch)
# Predictions.
raw_prediction = tf.reshape(raw_output, [-1, N_CLASSES])
label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), one_hot=False) # [batch_size, h, w]
raw_gt = tf.reshape(label_proc, [-1,])
indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, N_CLASSES - 1)), 1)
gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
prediction = tf.gather(raw_prediction, indices)
# Pixel-wise softmax loss.
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt))
# Loss summary
loss_summary = tf.summary.scalar("loss_softmax", loss)
# Processed predictions: for visualisation.
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])
raw_output_up = tf.argmax(raw_output_up, dimension=3)
pred = tf.expand_dims(raw_output_up, dim=3)
# Image summary.
images_summary = tf.py_func(inv_preprocess, [image_batch, SAVE_NUM_IMAGES], tf.uint8)
labels_summary = tf.py_func(decode_labels, [label_batch, SAVE_NUM_IMAGES], tf.uint8)
preds_summary = tf.py_func(decode_labels, [pred, SAVE_NUM_IMAGES], tf.uint8)
total_summary = tf.summary.image('images', tf.concat([images_summary, labels_summary, preds_summary], 2),
max_outputs=SAVE_NUM_IMAGES) # Concatenate row-wise.
summary_writer = tf.summary.FileWriter(LOG_DIR, graph=tf.get_default_graph())
# Define loss and optimisation parameters.
base_lr = tf.constant(LEARNING_RATE)
step_ph = tf.placeholder(dtype=tf.float32, shape=())
learning_rate = tf.train.exponential_decay(base_lr, step_ph, DECAY_STEPS, DECAY_RATE, staircase=True)
optim = tf.train.MomentumOptimizer(learning_rate, MOMENTUM).minimize(loss)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
# Restore variables
all_trainable = tf.trainable_variables()
all_saver_var = tf.global_variables()
saver = tf.train.Saver(var_list=all_saver_var, max_to_keep=10)
# Load variables if the checkpoint is provided.
if RESTORE_FROM is not None:
loader = tf.train.Saver(var_list=all_trainable)
load(loader, sess, RESTORE_FROM)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in range(NUM_STEPS):
start_time = time.time()
feed_dict = { step_ph : step }
loss_value = 0
# Apply gradients.
if step % SAVE_PRED_EVERY == 0:
summary, loss_value, lr, _ = sess.run([total_summary, loss, learning_rate, optim], feed_dict=feed_dict)
summary_writer.add_summary(summary, step)
save(saver, sess, SNAPSHOT_DIR, step)
else:
summary_str, loss_value, lr, _ = sess.run([loss_summary, loss, learning_rate, optim], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
duration = time.time() - start_time
print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step) \t learning_rate = {:f}'.format(step, loss_value, duration, lr))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 4,514 | py | 29 | train_parsing_vgg.py | 15 | 0.659504 | 0.645326 | 0 | 124 | 35.403226 | 125 |
zx490336534/Spider_Basics | 8,847,632,635,216 | 340b873584086a2097a680291f56a87b91dd8e6b | 15c07658420771267db5f56b295064beaa79db46 | /06-requests/正式课/requests_test.py | 277c0251274ce070d7defaadbdd55ce11a2b67af | []
| no_license | https://github.com/zx490336534/Spider_Basics | 4dab606d7b72fa3eb1eb0b5467f12d523b029c93 | aa3f723b8887c33af73dbf2bbad70d059a584201 | refs/heads/master | 2021-05-12T09:41:08.877423 | 2018-02-05T12:24:36 | 2018-02-05T12:24:36 | 117,328,894 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import urllib3
from requests.cookies import RequestsCookieJar
urllib3.disable_warnings()
#基础用法
# url = 'https://httpbin.org'
# r = requests.get(url)
# print(r.text)
#提交get参数
# payload = {'key1':'value1','key2':'value2'}
# r = requests.get('https://httpbin.org/get',params=payload)
# print(r.text)
#等价于
# r = requests.get('https://httpbin.org/get?key1=value1&key2=value2')
# print(r.text)
#
# data = {
# "key1": "value1",
# "key2": "value2"
# }
# headers = {
# "Connection": "keep-alive",
# "Referer": "httpbin.org",
# "User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.83 Safari/535.11",
# }
# url = 'https://httpbin.org/post'
# r = requests.post(url,data=data,headers=headers)
# print(r.text)
# print(r.json())
'''
json转字符串
data = {'k':'v'}
json.dumps(data)
字符串转json
s = '{'k':'v'}'
json.loads(s)
'''
#取消自动跳转,302跳转
# r = requests.get('http://github.com',allow_redirects=True,verify=False)
# print(r.status_code)
#超时,会报错
# r = requests.get('http://github.com',timeout = 0.001,verify=False)
# print(r.status_code)
#代理
proxies = {'http':'127.0.0.1:8080','https':'127.0.0.1:8080'}
r = requests.get('http://httpbin.org/',proxies=proxies)
# print(r.status_code)
#状态码
# r = requests.get('http://httpbin.org/')
# if r.status_code == 200:
# print('成功')
# else:
# print('失败')
STATUS_CODES_OK = 101
STATUS_CODES_ERROR = 102
status = 0
if status == STATUS_CODES_OK:
pass
elif status == STATUS_CODES_ERROR:
pass
else:
pass
#使用session
s = requests.session()
r = s.get('http://httpbin.org')
# print(r.status_code)
#cookie
'''
cookie 遵循这样一个原则
子域可以访问父域的cookie,但是父域不能访问子域
test.httpbin.org 可以访问httpbin.org的所有cookie,反之不能,同级域名也不行
子目录可以访问父目录,电脑上父目录不能访问子目录的
'''
jar = RequestsCookieJar()
jar.set('tasty_cookie','yum',domain='httpbin.org',path='/cookies')
jar.set('gross_cookie','blech',domain='httpbin.org',path='elsewhere')
jar.set('root_cookie','root',path='/')
jar.set('default_cookie','default')
url = 'http://httpbin.org/cookies'
r = requests.get(url,cookies=jar)
# print(r.text)
#编码
r = requests.get('http://www.baidu.com')
print(r.encoding)
#r.content 返回的是b'',即字符串
content = r.content
print(content)
print(content.decode('utf-8'))
#r.text返回str''
text = r.text
print(text)
print(text.encode('raw_unicode_escape').decode('utf-8'))
print(text.encode('iso-8859-1').decode('utf-8'))
print(text.encode(r.encoding).decode('utf-8')) #r.encoding有可能为None
'''
requests.util 模块 select_proxy 方法 中可以看出,代理的先后顺序为
譬如:http://httpbin.org/
这个地址
1、匹配 http://httpbin.org
,ps: 后面没有 /
2、匹配 http
3、all://httpbin.org
4、all
''' | UTF-8 | Python | false | false | 2,987 | py | 39 | requests_test.py | 29 | 0.663261 | 0.629588 | 0 | 131 | 19.183206 | 124 |
epan626/python_exam | 4,355,096,844,449 | b7d3a99158346fe31ae01b6771340e3c5e83bf96 | f10d59dfbb6b8c316b83493ea41f440dbc43b9e1 | /python_exam/apps/products/migrations/0004_auto_20161216_2011.py | 6cae56cd97ce63be63afb2248d073eeda6ba0576 | []
| no_license | https://github.com/epan626/python_exam | ca929e6984a07fceaa852d56cd833c42de61282d | 7e64c378402673cead7c07333dbaced749b44230 | refs/heads/master | 2021-01-12T08:47:10.636861 | 2016-12-16T22:13:37 | 2016-12-16T22:13:37 | 76,689,026 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-16 20:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_user'),
]
operations = [
migrations.AlterField(
model_name='product',
name='user',
field=models.ManyToManyField(default=1, related_name='user_products', to='regandlogin.User'),
),
]
| UTF-8 | Python | false | false | 500 | py | 10 | 0004_auto_20161216_2011.py | 5 | 0.612 | 0.568 | 0 | 20 | 24 | 105 |
LuisPerez64/Prac | 15,066,745,309,594 | 0aef95ec83b6557730f1209e040987b26a2f30f1 | 0734ea587e68ff5fc5068514ba63492a0cd0cbc7 | /python/implementations/data_structures/core/default_dict.py | d39a2ab2cfc8dcaa4db91bb6154218524bf69a5b | []
| no_license | https://github.com/LuisPerez64/Prac | 735756a3bfa7b33f2fcba5bdb92abb58d3a71dc1 | ef2c7fcb65c574317c83c3f632e066b96222b5f3 | refs/heads/master | 2021-01-18T03:43:08.566645 | 2020-11-12T04:47:16 | 2020-11-12T04:47:16 | 44,716,313 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Base implementation of the defaultdict object extending the base dict class.
"""
__all__ = ["DefaultDict"]
from typing import Callable
class DefaultDict(dict):
def __init__(self, missing_func: Callable, **kwargs):
self.missing_func = missing_func
super(DefaultDict, self).__init__(**kwargs)
def __missing__(self, key):
res = self.missing_func(key)
super(DefaultDict, self).__setitem__(key, res)
return res # super(DefaultDict, self).__getitem__(key)
| UTF-8 | Python | false | false | 508 | py | 493 | default_dict.py | 415 | 0.639764 | 0.639764 | 0 | 18 | 27.222222 | 76 |
hhhernandez/pym | 9,783,935,533,581 | befa3c74a96bf3bce451039b1f59eecb35f1799b | 69fcb7c7eb2485f2ffd0f2a2d7858d97b4fe4275 | /python/PYM.py | 1596897f31b5420c06111488cd3c75f8e1bd970f | []
| no_license | https://github.com/hhhernandez/pym | 3fdf8f57f4ac7cc57cc15ca77ae2cef2e29eceae | 5eccf449c7548175dbc2eb34b6dfa722573f00f3 | refs/heads/master | 2020-03-24T05:31:06.576519 | 2017-10-08T10:36:05 | 2017-10-08T10:36:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 17:03:41 2017
@author: valle
"""
def PYM_image_transformation(original, filename):
"This function performs the PYM calculation and returns a new frame"
h, w = original[:,:,0].shape # get original image shape
pym = np.zeros((h, w),np.int) # blank b/w image for storing pym image
red = np.zeros((h, w),np.int) # blank array for red
blue = np.zeros((h, w),np.int) # blank array for blue
# Specific channels
red = (original[:,:,2]).astype('float') # reading red channel from original image (NIR)
blue = (original[:,:,0]).astype('float') # reading blue channel from original image (blue)
# PYM calculation
max_sc = np.amax(red - blue/2)
pym = ((red - blue/2)*255/max_sc).astype('uint8') # computing new channel
pym[red - blue/2 < 0] = 0 # setting limit
# False color image
False_color_image = np.zeros((h, w,3),np.uint8) # make a blank RGB image
False_color_image[:,:,1] = pym
False_color_image[:,:,2] = 255 - pym
f_image = "FALSE_COLOR_" + filename
f_dest = "FALSE_COLOR/" + f_image
cv2.imwrite(f_dest, False_color_image)
return pym # return the image
def PYM_leaf_area_estimation(filename, include_holes=True):
image_source = cv2.imread(filename) # import source image
# Image transformation and storage
t = PYM_image_transformation(image_source, filename) # Transform image with PYM_image_transformation function
r_image = "NEW_CHANNEL_" + filename # Filename of the new image (visual checking)
r_dest = "NEW_CHANNEL/" + r_image # Folder
cv2.imwrite(r_dest, t) # saving image
# Image analysis
ret,thresh1 = cv2.threshold(t, 0, 255, cv2.THRESH_OTSU) # OTSU's thresholding
kernel_open = np.ones((6,6),np.uint8) # large kernel
kernel_mid = np.ones((4,4),np.uint8) # medium kernel
kernel_close = np.ones((2,2),np.uint8) # small kernel
kernel_veryclose = np.ones((1,1),np.uint8) # tiny petit
erosion = cv2.erode(thresh1, kernel_veryclose,iterations = 1) # edge erosion
opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel_open) # removing noise around the plant
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel_mid) # removing noise inside the plant
contours, hierarchy = cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # finding plant contours
# aa, contours, hierarchy = cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # with an older version of opencv
if include_holes==1: # Counting all pixels inside the largest area
areas = [] # list
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
sorted_area = sorted(areas, key=int, reverse = True)
leaf_area = sorted_area[0] # largest area, plant area by definition
leaf_area_index = areas.index(leaf_area) # finding area index
cnt = contours[leaf_area_index] # plant contours, with holes included
cv2.drawContours(closing, [cnt], 0,(255,0,0),-1) # drawing contours with holes included
if include_holes==0: # Counting all pixels detected
cv2.drawContours(closing, contours, -1, (255, 255, 255), -1) # drawing contours without including holes
leaf_area = (closing > 127).sum() # couting plants pixels
# Image storage
image_finale = "OUTPUT_" + filename # Filename of the output image
dest_finale = "OUTPUT/" + image_finale # Folder
cv2.imwrite(dest_finale, closing) # Saving image
return leaf_area # Plant area is returned as output of the function
def PYM_folder(dirname, include_holes):
os.chdir(dirname) # updating current directory
try:
os.mkdir("FALSE_COLOR") # creating a new folder to save FALSE_COLOR images
os.mkdir("NEW_CHANNEL") # creating a new folder to save NEW_CHANNEL images
os.mkdir("OUTPUT") # creating a new folder to save OUTPUT images
except Exception:
pass
fname = "out.csv"
file = open(fname, "wb")
writer = csv.writer(file)
writer.writerow(('plant_ID', 'leaf_area_pixel'))
i=0
types = ('*.png', '*.jpg') # checked formats
files_g = []
for files in types:
files_g.extend(glob.glob(files))
for filename in files_g: # loop for found files
i+=1
fi = cv2.imread(filename) # reading image
plant_id = filename[:-4] # picture's name is also plant ID, after removal of 4 last characters (".jpg" or ".png")
try:
leaf_area_pixel = PYM_leaf_area_estimation(filename, include_holes) # using precendtly declared function
writer.writerow((plant_id, leaf_area_pixel)) # storing computed leaf_area
except IndexError:
pass
file.close()
| UTF-8 | Python | false | false | 4,902 | py | 2 | PYM.py | 2 | 0.639331 | 0.618931 | 0 | 117 | 40.769231 | 129 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.