file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
symbolpattern81.py
|
height = int(input())
for i in range(1,height+1):
for j in range(1,height+1):
if(i == 1 or j == 1 or i == height or j == height or i == j or i == height-j+1):
print("*",end=" ")
|
else:
print(end=" ")
print()
# Sample Input :- 7
# Output :-
# * * * * * * *
# * * * *
# * * * *
# * * *
# * * * *
# * * * *
# * * * * * * *
| |
moment.js
|
import { hooks as moment, setHookCallback } from './lib/utils/hooks';
moment.version = '2.10.2';
import {
min,
max,
isMoment,
momentPrototype as fn,
createUTC as utc,
createUnix as unix,
createLocal as local,
createInvalid as invalid,
createInZone as parseZone
} from './lib/moment/moment';
import {
defineLocale,
getSetGlobalLocale as locale,
getLocale as localeData,
listMonths as months,
listMonthsShort as monthsShort,
listWeekdays as weekdays,
listWeekdaysMin as weekdaysMin,
listWeekdaysShort as weekdaysShort
} from './lib/locale/locale';
import {
isDuration,
createDuration as duration,
getSetRelativeTimeThreshold as relativeTimeThreshold
} from './lib/duration/duration';
import { normalizeUnits } from './lib/units/units';
import isDate from './lib/utils/is-date';
setHookCallback(local);
moment.fn = fn;
moment.min = min;
moment.max = max;
moment.utc = utc;
moment.unix = unix;
|
moment.locale = locale;
moment.invalid = invalid;
moment.duration = duration;
moment.isMoment = isMoment;
moment.weekdays = weekdays;
moment.parseZone = parseZone;
moment.localeData = localeData;
moment.isDuration = isDuration;
moment.monthsShort = monthsShort;
moment.weekdaysMin = weekdaysMin;
moment.defineLocale = defineLocale;
moment.weekdaysShort = weekdaysShort;
moment.normalizeUnits = normalizeUnits;
moment.relativeTimeThreshold = relativeTimeThreshold;
export default moment;
|
moment.months = months;
moment.isDate = isDate;
|
integration_test.go
|
package keeper_test
import (
abci "github.com/tendermint/tendermint/abci/types"
sdk "github.com/cosmos/cosmos-sdk/types"
simapp "github.com/irisnet/modules/incubator/nft/app"
"github.com/irisnet/modules/incubator/nft/internal/types"
)
// nolint: deadcode unused
var (
denom = "test-denom"
denom2 = "test-denom2"
denom3 = "test-denom3"
id = "1"
id2 = "2"
id3 = "3"
address = types.CreateTestAddrs(1)[0]
address2 = types.CreateTestAddrs(2)[1]
address3 = types.CreateTestAddrs(3)[2]
tokenURI = "https://google.com/token-1.json"
tokenURI2 = "https://google.com/token-2.json"
)
func createTestApp(isCheckTx bool) (*simapp.SimApp, sdk.Context)
|
{
app := simapp.Setup(isCheckTx)
ctx := app.BaseApp.NewContext(isCheckTx, abci.Header{})
return app, ctx
}
|
|
bigip_pool_member.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# Copyright (c) 2013 Matt Hite <mhite@hotmail.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_pool_member
short_description: Manages F5 BIG-IP LTM pool members
description:
- Manages F5 BIG-IP LTM pool members via iControl SOAP API.
version_added: 1.4
options:
name:
description:
- Name of the node to create, or re-use, when creating a new pool member.
- This parameter is optional and, if not specified, a node name will be
created automatically from either the specified C(address) or C(fqdn).
version_added: 2.6
state:
description:
- Pool member state.
required: True
default: present
choices:
- present
- absent
- enabled
- disabled
- forced_offline
pool:
description:
- Pool name. This pool must exist.
required: True
partition:
description:
- Partition
default: Common
address:
description:
- IP address of the pool member. This can be either IPv4 or IPv6. When creating a
new pool member, one of either C(address) or C(fqdn) must be provided. This
parameter cannot be updated after it is set.
aliases:
- ip
- host
version_added: 2.2
fqdn:
description:
- FQDN name of the pool member. This can be any name that is a valid RFC 1123 DNS
name. Therefore, the only characters that can be used are "A" to "Z",
"a" to "z", "0" to "9", the hyphen ("-") and the period (".").
- FQDN names must include at lease one period; delineating the host from
the domain. ex. C(host.domain).
- FQDN names must end with a letter or a number.
- When creating a new pool member, one of either C(address) or C(fqdn) must be
provided. This parameter cannot be updated after it is set.
aliases:
- hostname
version_added: 2.6
port:
description:
- Pool member port.
- This value cannot be changed after it has been set.
required: True
connection_limit:
description:
- Pool member connection limit. Setting this to 0 disables the limit.
description:
description:
- Pool member description.
rate_limit:
description:
- Pool member rate limit (connections-per-second). Setting this to 0
disables the limit.
ratio:
description:
- Pool member ratio weight. Valid values range from 1 through 100.
New pool members -- unless overridden with this value -- default
to 1.
preserve_node:
description:
- When state is C(absent) attempts to remove the node that the pool
member references.
- The node will not be removed if it is still referenced by other pool
members. If this happens, the module will not raise an error.
- Setting this to C(yes) disables this behavior.
type: bool
version_added: 2.1
priority_group:
description:
- Specifies a number representing the priority group for the pool member.
- When adding a new member, the default is 0, meaning that the member has no priority.
- To specify a priority, you must activate priority group usage when you
create a new pool or when adding or removing pool members. When activated,
the system load balances traffic according to the priority group number
assigned to the pool member.
- The higher the number, the higher the priority, so a member with a priority
of 3 has higher priority than a member with a priority of 1.
version_added: 2.5
fqdn_auto_populate:
description:
- Specifies whether the system automatically creates ephemeral nodes using
the IP addresses returned by the resolution of a DNS query for a node
defined by an FQDN.
- When C(enabled), the system generates an ephemeral node for each IP address
returned in response to a DNS query for the FQDN of the node. Additionally,
when a DNS response indicates the IP address of an ephemeral node no longer
exists, the system deletes the ephemeral node.
- When C(disabled), the system resolves a DNS query for the FQDN of the node
with the single IP address associated with the FQDN.
- When creating a new pool member, the default for this parameter is C(yes).
- This parameter is ignored when C(reuse_nodes) is C(yes).
type: bool
version_added: 2.6
reuse_nodes:
description:
- Reuses node definitions if requested.
default: yes
type: bool
version_added: 2.6
session_state:
description:
- Set new session availability status for pool member.
- This parameter is deprecated and will be removed in Ansible 2.7. Use C(state)
C(enabled) or C(disabled).
version_added: 2.0
choices:
- enabled
- disabled
monitor_state:
description:
- Set monitor availability status for pool member.
- This parameter is deprecated and will be removed in Ansible 2.7. Use C(state)
C(enabled) or C(disabled).
version_added: 2.0
choices:
- enabled
- disabled
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add pool member
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
delegate_to: localhost
- name: Modify pool member ratio and description
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
ratio: 1
description: nginx server
delegate_to: localhost
- name: Remove pool member from pool
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: absent
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
- name: Force pool member offline
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: forced_offline
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
- name: Create members with priority groups
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
pool: my-pool
partition: Common
host: "{{ item.address }}"
name: "{{ item.name }}"
priority_group: "{{ item.priority_group }}"
port: 80
delegate_to: localhost
loop:
- host: 1.1.1.1
name: web1
priority_group: 4
- host: 2.2.2.2
name: web2
priority_group: 3
- host: 3.3.3.3
name: web3
priority_group: 2
- host: 4.4.4.4
name: web4
priority_group: 1
'''
RETURN = '''
rate_limit:
description: The new rate limit, in connections per second, of the pool member.
returned: changed
type: int
sample: 100
connection_limit:
description: The new connection limit of the pool member
returned: changed
type: int
sample: 1000
description:
description: The new description of pool member.
returned: changed
type: string
sample: My pool member
ratio:
description: The new pool member ratio weight.
returned: changed
type: int
sample: 50
priority_group:
description: The new priority group.
returned: changed
type: int
sample: 3
fqdn_auto_populate:
description: Whether FQDN auto population was set on the member or not.
returned: changed
type: bool
sample: True
fqdn:
description: The FQDN of the pool member.
returned: changed
type: string
sample: foo.bar.com
address:
description: The address of the pool member.
returned: changed
type: string
sample: 1.2.3.4
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network.common.utils import validate_ip_address
from ansible.module_utils.network.common.utils import validate_ip_v6_address
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import is_valid_hostname
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import is_valid_ip
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import is_valid_hostname
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import is_valid_ip
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'rateLimit': 'rate_limit',
'connectionLimit': 'connection_limit',
'priorityGroup': 'priority_group',
}
api_attributes = [
'rateLimit', 'connectionLimit', 'description', 'ratio', 'priorityGroup',
'address', 'fqdn', 'session', 'state'
]
returnables = [
'rate_limit', 'connection_limit', 'description', 'ratio', 'priority_group',
'fqdn_auto_populate', 'session', 'state', 'fqdn', 'address'
]
updatables = [
'rate_limit', 'connection_limit', 'description', 'ratio', 'priority_group',
'fqdn_auto_populate', 'state'
]
class ModuleParameters(Parameters):
@property
def full_name(self):
delimiter = ':'
if validate_ip_v6_address(self.full_name_dict['name']):
delimiter = '.'
return '{0}{1}{2}'.format(self.full_name_dict['name'], delimiter, self.port)
@property
def full_name_dict(self):
if self._values['name'] is None:
name = self._values['address'] if self._values['address'] else self._values['fqdn']
else:
name = self._values['name']
return dict(
name=name,
port=self.port
)
@property
def node_name(self):
return self.full_name_dict['name']
@property
def fqdn_name(self):
return self._values['fqdn']
@property
def fqdn(self):
result = {}
if self.fqdn_auto_populate:
result['autopopulate'] = 'enabled'
else:
result['autopopulate'] = 'disabled'
if self._values['fqdn'] is None:
return result
if not is_valid_hostname(self._values['fqdn']):
raise F5ModuleError(
"The specified 'fqdn' is not a valid hostname."
)
result['tmName'] = self._values['fqdn']
return result
@property
def pool(self):
return fq_name(self.want.partition, self._values['pool'])
@property
def port(self):
if 0 > int(self._values['port']) or int(self._values['port']) > 65535:
raise F5ModuleError(
"Valid ports must be in range 0 - 65535"
)
return int(self._values['port'])
@property
def state(self):
# TODO(Remove all of this state craziness in 2.7)
if self.session_state is not None or self.monitor_state is not None:
if self._values['state'] in ['enabled', 'disabled', 'forced_offline']:
self._values['__warnings'].append([{
'msg': "'session_state' is deprecated and will be ignored in favor of 'state'.",
'version': '2.7'
}])
return self._values['state']
else:
if self.session_state is not None:
self._values['__warnings'].append([{
'msg': "'session_state' is deprecated and will be removed in the future. Use 'state'.",
'version': '2.7'
}])
elif self.monitor_state is not None:
self._values['__warnings'].append([{
'msg': "'monitor_state' is deprecated and will be removed in the future. Use 'state'.",
'version': '2.7'
}])
if self.session_state == 'enabled' and self.monitor_state == 'enabled':
return 'enabled'
elif self.session_state == 'disabled' and self.monitor_state == 'enabled':
return 'disabled'
else:
return 'forced_offline'
return self._values['state']
@property
def address(self):
if self._values['address'] is None:
return None
elif self._values['address'] == 'any6':
return 'any6'
if is_valid_ip(self._values['address']):
return self._values['address']
raise F5ModuleError(
"The specified 'address' value is not a valid IP address."
)
class ApiParameters(Parameters):
@property
def allow(self):
if self._values['allow'] is None:
return ''
if self._values['allow'][0] == 'All':
return 'all'
allow = self._values['allow']
result = list(set([str(x) for x in allow]))
result = sorted(result)
return result
@property
def rate_limit(self):
if self._values['rate_limit'] is None:
return None
if self._values['rate_limit'] == 'disabled':
return 0
return int(self._values['rate_limit'])
@property
def state(self):
if self._values['state'] in ['user-up', 'unchecked', 'fqdn-up-no-addr'] and self._values['session'] in ['user-enabled']:
return 'present'
elif self._values['state'] in ['down', 'up'] and self._values['session'] == 'monitor-enabled':
return 'present'
elif self._values['state'] in ['user-down'] and self._values['session'] in ['user-disabled']:
return 'forced_offline'
else:
return 'disabled'
class NodeApiParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def ssl_cipher_suite(self):
default = ':'.join(sorted(Parameters._ciphers.split(':')))
if self._values['ssl_cipher_suite'] == default:
return 'default'
else:
return self._values['ssl_cipher_suite']
@property
def fqdn_auto_populate(self):
if self._values['fqdn'] is None:
return None
if 'autopopulate' in self._values['fqdn']:
if self._values['fqdn']['autopopulate'] == 'enabled':
return True
return False
@property
def fqdn(self):
if self._values['fqdn'] is None:
return None
if 'tmName' in self._values['fqdn']:
return self._values['fqdn']['tmName']
@property
def state(self):
if self._values['state'] in ['user-up', 'unchecked', 'fqdn-up-no-addr'] and self._values['session'] in ['user-enabled']:
return 'present'
elif self._values['state'] in ['down', 'up'] and self._values['session'] == 'monitor-enabled':
return 'present'
elif self._values['state'] in ['user-down'] and self._values['session'] in ['user-disabled']:
return 'forced_offline'
else:
return 'disabled'
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.state == self.have.state:
return None
if self.want.state == 'forced_offline':
return {
'state': 'user-down',
'session': 'user-disabled'
}
elif self.want.state == 'disabled':
return {
'state': 'user-up',
'session': 'user-disabled'
}
elif self.want.state in ['present', 'enabled']:
return {
'state': 'user-up',
'session': 'user-enabled'
}
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'present', 'enabled', 'disabled', 'forced_offline']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
try:
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
except Exception:
raise F5ModuleError('The specified pool does not exist')
result = pool.members_s.members.exists(
name=self.want.full_name,
partition=self.want.partition
)
return result
def node_exists(self):
resource = self.client.api.tm.ltm.nodes.node.exists(
name=self.want.node_name,
partition=self.want.partition
)
return resource
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if not self.want.preserve_node:
self.remove_node_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def _set_host_by_name(self):
if is_valid_ip(self.want.name):
self.want.update({
'fqdn': None,
'address': self.want.name
})
else:
if not is_valid_hostname(self.want.name):
raise F5ModuleError(
"'name' is neither a valid IP address or FQDN name."
)
self.want.update({
'fqdn': self.want.name,
'address': None
})
def _update_api_state_attributes(self):
if self.want.state == 'forced_offline':
self.want.update({
'state': 'user-down',
'session': 'user-disabled',
# TODO(Remove in 2.7)
'session_state': None,
'monitor_state': None
})
elif self.want.state == 'disabled':
self.want.update({
'state': 'user-up',
'session': 'user-disabled',
# TODO(Remove in 2.7)
'session_state': None,
'monitor_state': None
})
elif self.want.state in ['present', 'enabled']:
self.want.update({
'state': 'user-up',
'session': 'user-enabled',
# TODO(Remove in 2.7)
'session_state': None,
'monitor_state': None
})
def _update_address_with_existing_nodes(self):
try:
have = self.read_current_node_from_device(self.want.node_name)
if self.want.fqdn_auto_populate and self.want.reuse_nodes:
self.module.warn("'fqdn_auto_populate' is discarded in favor of the re-used node's auto-populate setting.")
self.want.update({
'fqdn_auto_populate': True if have.fqdn['autopopulate'] == 'enabled' else False
})
if 'tmName' in have.fqdn:
self.want.update({
'fqdn': have.fqdn['tmName'],
'address': 'any6'
})
else:
self.want.update({
'address': have.address
})
except Exception:
return None
def create(self):
if self.want.reuse_nodes:
self._update_address_with_existing_nodes()
if self.want.name and not any(x for x in [self.want.address, self.want.fqdn_name]):
self._set_host_by_name()
self._update_api_state_attributes()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
pool.members_s.members.create(
name=self.want.full_name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.changes.api_params()
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
resource = pool.members_s.members.load(
name=self.want.full_name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
elif not self.want.preserve_node and self.node_exists():
return self.remove_node_from_device()
return False
def remove_from_device(self):
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
resource = pool.members_s.members.load(
name=self.want.full_name,
partition=self.want.partition
)
if resource:
resource.delete()
def remove_node_from_device(self):
resource = self.client.api.tm.ltm.nodes.node.load(
name=self.want.node_name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
resource = pool.members_s.members.load(
name=self.want.full_name,
partition=self.want.partition
)
return ApiParameters(params=resource.attrs)
def read_current_node_from_device(self, node):
resource = self.client.api.tm.ltm.nodes.node.load(
name=node,
partition=self.want.partition
)
return NodeApiParameters(params=resource.attrs)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
pool=dict(required=True),
address=dict(aliases=['host', 'ip']),
fqdn=dict(
aliases=['hostname']
),
name=dict(),
port=dict(type='int', required=True),
connection_limit=dict(type='int'),
description=dict(),
rate_limit=dict(type='int'),
ratio=dict(type='int'),
preserve_node=dict(type='bool'),
priority_group=dict(type='int'),
state=dict(
default='present',
choices=['absent', 'present', 'enabled', 'disabled', 'forced_offline']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
fqdn_auto_populate=dict(type='bool'),
reuse_nodes=dict(type='bool', default=True),
# Deprecated params
# TODO(Remove in 2.7)
session_state=dict(
choices=['enabled', 'disabled'],
removed_in_version=2.7,
),
monitor_state=dict(
choices=['enabled', 'disabled'],
removed_in_version=2.7,
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['address', 'fqdn']
]
self.required_one_of = [
['name', 'address', 'fqdn'],
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
|
if __name__ == '__main__':
main()
|
module.fail_json(msg=str(ex))
|
create_hosted_tokenization_request.py
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated.
#
from onlinepayments.sdk.data_object import DataObject
class CreateHostedTokenizationRequest(DataObject):
__ask_consumer_consent = None
__locale = None
__tokens = None
__variant = None
@property
def ask_consumer_consent(self) -> bool:
"""
| Indicate if the tokenization form should contain a prompt asking the user to give consent for storing their information for future payments.
| If this parameter is false, you should ask the user yourself and provide the answer when submitting the Tokenizer in your javascript code.
Type: bool
"""
return self.__ask_consumer_consent
|
@ask_consumer_consent.setter
def ask_consumer_consent(self, value: bool):
self.__ask_consumer_consent = value
@property
def locale(self) -> str:
"""
| Locale used in the GUI towards the consumer.
Type: str
"""
return self.__locale
@locale.setter
def locale(self, value: str):
self.__locale = value
@property
def tokens(self) -> str:
"""
| String containing comma separated tokens (no spaces) associated with the customer of this hosted session. Valid tokens will be used to present the customer the option to re-use previously used payment details. This means the customer for instance does not have to re-enter their card details again, which a big plus when the customer is using their mobile phone to complete the operation.
Type: str
"""
return self.__tokens
@tokens.setter
def tokens(self, value: str):
self.__tokens = value
@property
def variant(self) -> str:
"""
| Using the Back-Office it is possible to upload multiple templates of your HostedCheckout payment pages. You can force the use of another template by specifying it in the variant field. This allows you to test out the effect of certain changes to your hostedcheckout pages in a controlled manner. Please note that you need to specify the filename of the template.
Type: str
"""
return self.__variant
@variant.setter
def variant(self, value: str):
self.__variant = value
def to_dictionary(self):
dictionary = super(CreateHostedTokenizationRequest, self).to_dictionary()
if self.ask_consumer_consent is not None:
dictionary['askConsumerConsent'] = self.ask_consumer_consent
if self.locale is not None:
dictionary['locale'] = self.locale
if self.tokens is not None:
dictionary['tokens'] = self.tokens
if self.variant is not None:
dictionary['variant'] = self.variant
return dictionary
def from_dictionary(self, dictionary):
super(CreateHostedTokenizationRequest, self).from_dictionary(dictionary)
if 'askConsumerConsent' in dictionary:
self.ask_consumer_consent = dictionary['askConsumerConsent']
if 'locale' in dictionary:
self.locale = dictionary['locale']
if 'tokens' in dictionary:
self.tokens = dictionary['tokens']
if 'variant' in dictionary:
self.variant = dictionary['variant']
return self
| |
impls.rs
|
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Helper functions for implementing `RngCore` functions.
//!
//! For cross-platform reproducibility, these functions all use Little Endian:
//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
//!
//! Byte-swapping (like the std `to_le` functions) is only needed to convert
//! to/from byte sequences, and since its purpose is reproducibility,
//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
use crate::RngCore;
use core::cmp::min;
/// Implement `next_u64` via `next_u32`, little-endian order.
pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
// Use LE; we explicitly generate one value before the next.
let x = u64::from(rng.next_u32());
let y = u64::from(rng.next_u32());
(y << 32) | x
}
/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
///
/// The fastest way to fill a slice is usually to work as long as possible with
/// integers. That is why this method mostly uses `next_u64`, and only when
/// there are 4 or less bytes remaining at the end of the slice it uses
/// `next_u32` once.
pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
let mut left = dest;
while left.len() >= 8 {
let (l, r) = { left }.split_at_mut(8);
left = r;
let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
l.copy_from_slice(&chunk);
}
let n = left.len();
if n > 4 {
let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
left.copy_from_slice(&chunk[..n]);
} else if n > 0 {
let chunk: [u8; 4] = rng.next_u32().to_le_bytes();
left.copy_from_slice(&chunk[..n]);
}
}
macro_rules! fill_via_chunks {
($src:expr, $dst:expr, $ty:ty) => {{
const SIZE: usize = core::mem::size_of::<$ty>();
let chunk_size_u8 = min($src.len() * SIZE, $dst.len());
let chunk_size = (chunk_size_u8 + SIZE - 1) / SIZE;
// The following can be replaced with safe code, but unfortunately it's
// ca. 8% slower.
if cfg!(target_endian = "little") {
unsafe {
core::ptr::copy_nonoverlapping(
$src.as_ptr() as *const u8,
$dst.as_mut_ptr(),
chunk_size_u8);
}
} else {
for (&n, chunk) in $src.iter().zip($dst.chunks_mut(SIZE)) {
let tmp = n.to_le();
let src_ptr = &tmp as *const $ty as *const u8;
unsafe {
core::ptr::copy_nonoverlapping(
src_ptr,
chunk.as_mut_ptr(),
chunk.len());
}
}
}
(chunk_size, chunk_size_u8)
}};
}
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
/// based RNG.
///
/// The return values are `(consumed_u32, filled_u8)`.
///
/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
/// the length of `dest`.
/// `consumed_u32` is the number of words consumed from `src`, which is the same
/// as `filled_u8 / 4` rounded up.
///
/// # Example
/// (from `IsaacRng`)
///
/// ```ignore
/// fn fill_bytes(&mut self, dest: &mut [u8]) {
/// let mut read_len = 0;
/// while read_len < dest.len() {
/// if self.index >= self.rsl.len() {
/// self.isaac();
/// }
///
/// let (consumed_u32, filled_u8) =
/// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
/// &mut dest[read_len..]);
///
/// self.index += consumed_u32;
/// read_len += filled_u8;
/// }
/// }
/// ```
pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
fill_via_chunks!(src, dest, u32)
}
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
/// based RNG.
///
/// The return values are `(consumed_u64, filled_u8)`.
/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
/// the length of `dest`.
/// `consumed_u64` is the number of words consumed from `src`, which is the same
/// as `filled_u8 / 8` rounded up.
///
/// See `fill_via_u32_chunks` for an example.
pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
fill_via_chunks!(src, dest, u64)
}
/// Implement `next_u32` via `fill_bytes`, little-endian order.
pub fn
|
<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
let mut buf = [0; 4];
rng.fill_bytes(&mut buf);
u32::from_ne_bytes(buf)
}
/// Implement `next_u64` via `fill_bytes`, little-endian order.
pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
let mut buf = [0; 8];
rng.fill_bytes(&mut buf);
u64::from_ne_bytes(buf)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_fill_via_u32_chunks() {
let src = [1, 2, 3];
let mut dst = [0u8; 11];
assert_eq!(fill_via_u32_chunks(&src, &mut dst), (3, 11));
assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]);
let mut dst = [0u8; 13];
assert_eq!(fill_via_u32_chunks(&src, &mut dst), (3, 12));
assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]);
let mut dst = [0u8; 5];
assert_eq!(fill_via_u32_chunks(&src, &mut dst), (2, 5));
assert_eq!(dst, [1, 0, 0, 0, 2]);
}
#[test]
fn test_fill_via_u64_chunks() {
let src = [1, 2];
let mut dst = [0u8; 11];
assert_eq!(fill_via_u64_chunks(&src, &mut dst), (2, 11));
assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]);
let mut dst = [0u8; 17];
assert_eq!(fill_via_u64_chunks(&src, &mut dst), (2, 16));
assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]);
let mut dst = [0u8; 5];
assert_eq!(fill_via_u64_chunks(&src, &mut dst), (1, 5));
assert_eq!(dst, [1, 0, 0, 0, 0]);
}
}
|
next_u32_via_fill
|
term2.rs
|
//! This provides wrappers around the `StdoutTerminal` and `StderrTerminal` types
//! that does not fail if `StdoutTerminal` etc can't be constructed, which happens
//! if TERM isn't defined.
use markdown::tokenize;
use markdown::{Block, ListItem, Span};
use rustup_utils::tty;
use std::io;
use term;
pub use term::color;
pub use term::Attr;
pub trait Instantiable {
fn instance() -> Self;
}
impl Instantiable for io::Stdout {
fn instance() -> Self {
io::stdout()
}
}
impl Instantiable for io::Stderr {
fn instance() -> Self {
io::stderr()
}
}
pub trait Isatty {
fn isatty() -> bool;
}
impl Isatty for io::Stdout {
fn isatty() -> bool {
tty::stdout_isatty()
}
}
impl Isatty for io::Stderr {
fn isatty() -> bool {
tty::stderr_isatty()
}
}
pub struct Terminal<T>(Option<Box<term::Terminal<Output = T> + Send>>)
where
T: Instantiable + Isatty + io::Write;
pub type StdoutTerminal = Terminal<io::Stdout>;
pub type StderrTerminal = Terminal<io::Stderr>;
pub fn stdout() -> StdoutTerminal {
Terminal(term::stdout())
}
pub fn stderr() -> StderrTerminal {
Terminal(term::stderr())
}
// Handles the wrapping of text written to the console
struct LineWrapper<'a, T: io::Write + 'a> {
indent: u32,
margin: u32,
pos: u32,
pub w: &'a mut T,
}
impl<'a, T: io::Write + 'a> LineWrapper<'a, T> {
// Just write a newline
fn write_line(&mut self) {
let _ = writeln!(self.w, "");
// Reset column position to start of line
self.pos = 0;
}
// Called before writing text to ensure indent is applied
fn write_indent(&mut self) {
if self.pos == 0 {
// Write a space for each level of indent
for _ in 0..self.indent {
let _ = write!(self.w, " ");
}
self.pos = self.indent;
}
}
// Write a non-breaking word
fn write_word(&mut self, word: &str) {
// Ensure correct indentation
self.write_indent();
let word_len = word.len() as u32;
// If this word goes past the margin
if self.pos + word_len > self.margin {
// And adding a newline would give us more space
if self.pos > self.indent {
// Then add a newline!
self.write_line();
self.write_indent();
}
}
// Write the word
let _ = write!(self.w, "{}", word);
self.pos += word_len;
}
fn write_space(&mut self) {
if self.pos > self.indent {
if self.pos < self.margin {
self.write_word(" ");
} else {
self.write_line();
}
}
}
// Writes a span of text which wraps at the margin
fn write_span(&mut self, text: &str) {
// Allow words to wrap on whitespace
let mut is_first = true;
for word in text.split(char::is_whitespace) {
if is_first {
is_first = false;
} else {
self.write_space();
}
self.write_word(word);
}
}
// Constructor
fn new(w: &'a mut T, indent: u32, margin: u32) -> Self {
LineWrapper {
indent: indent,
margin: margin,
pos: indent,
w: w,
}
}
}
// Handles the formatting of text
struct LineFormatter<'a, T: Instantiable + Isatty + io::Write + 'a> {
wrapper: LineWrapper<'a, Terminal<T>>,
attrs: Vec<Attr>,
}
impl<'a, T: Instantiable + Isatty + io::Write + 'a> LineFormatter<'a, T> {
fn new(w: &'a mut Terminal<T>, indent: u32, margin: u32) -> Self {
LineFormatter {
wrapper: LineWrapper::new(w, indent, margin),
attrs: Vec::new(),
}
}
fn push_attr(&mut self, attr: Attr) {
self.attrs.push(attr);
let _ = self.wrapper.w.attr(attr);
}
fn pop_attr(&mut self) {
self.attrs.pop();
let _ = self.wrapper.w.reset();
for attr in &self.attrs {
let _ = self.wrapper.w.attr(*attr);
}
}
fn do_spans(&mut self, spans: Vec<Span>) {
for span in spans {
match span {
Span::Break => {}
Span::Text(text) => {
self.wrapper.write_span(&text);
}
Span::Code(code) => {
self.push_attr(Attr::Bold);
self.wrapper.write_word(&code);
self.pop_attr();
}
Span::Emphasis(spans) => {
self.push_attr(Attr::ForegroundColor(color::BRIGHT_RED));
self.do_spans(spans);
self.pop_attr();
}
_ => {}
}
}
}
fn do_block(&mut self, b: Block) {
match b {
Block::Header(spans, _) => {
self.push_attr(Attr::Bold);
self.wrapper.write_line();
self.do_spans(spans);
self.wrapper.write_line();
self.pop_attr();
}
Block::CodeBlock(code) => {
self.wrapper.write_line();
self.wrapper.indent += 2;
for line in code.lines() {
// Don't word-wrap code lines
self.wrapper.write_word(line);
self.wrapper.write_line();
}
self.wrapper.indent -= 2;
}
Block::Paragraph(spans) => {
self.wrapper.write_line();
self.do_spans(spans);
self.wrapper.write_line();
}
Block::UnorderedList(items) => {
self.wrapper.write_line();
for item in items {
self.wrapper.indent += 2;
match item {
ListItem::Simple(spans) => {
self.do_spans(spans);
}
ListItem::Paragraph(blocks) => {
for block in blocks {
self.do_block(block);
}
|
self.wrapper.write_line();
self.wrapper.indent -= 2;
}
}
_ => {}
}
}
}
impl<T: Instantiable + Isatty + io::Write> io::Write for Terminal<T> {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
if let Some(ref mut t) = self.0 {
t.write(buf)
} else {
let mut t = T::instance();
t.write(buf)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
if let Some(ref mut t) = self.0 {
t.flush()
} else {
let mut t = T::instance();
t.flush()
}
}
}
impl<T: Instantiable + Isatty + io::Write> Terminal<T> {
pub fn fg(&mut self, color: color::Color) -> Result<(), term::Error> {
if !T::isatty() {
return Ok(());
}
if let Some(ref mut t) = self.0 {
t.fg(color)
} else {
Ok(())
}
}
pub fn attr(&mut self, attr: Attr) -> Result<(), term::Error> {
if !T::isatty() {
return Ok(());
}
if let Some(ref mut t) = self.0 {
if let Err(e) = t.attr(attr) {
// If `attr` is not supported, try to emulate it
match attr {
Attr::Bold => t.fg(color::BRIGHT_WHITE),
_ => Err(e),
}
} else {
Ok(())
}
} else {
Ok(())
}
}
pub fn reset(&mut self) -> Result<(), term::Error> {
if !T::isatty() {
return Ok(());
}
if let Some(ref mut t) = self.0 {
t.reset()
} else {
Ok(())
}
}
pub fn md<S: AsRef<str>>(&mut self, content: S) {
let mut f = LineFormatter::new(self, 0, 79);
let blocks = tokenize(content.as_ref());
for b in blocks {
f.do_block(b);
}
}
}
|
}
}
|
__init__.py
|
import builtins
from functools import lru_cache
from types import ModuleType, FunctionType, BuiltinFunctionType
from typing import Iterable
import torch
from .funcs import *
from .funcs import __all__ as _funcs_all
from .funcs.base import get_func_from_torch
from .size import *
from .size import __all__ as _size_all
from .tensor import *
from .tensor import __all__ as _tensor_all
from ..config.meta import __VERSION__
__all__ = [
*_funcs_all,
*_size_all,
*_tensor_all,
]
_basic_types = (
builtins.bool, builtins.bytearray, builtins.bytes, builtins.complex, builtins.dict,
builtins.float, builtins.frozenset, builtins.int, builtins.list, builtins.range, builtins.set,
builtins.slice, builtins.str, builtins.tuple,
)
_torch_all = set(torch.__all__)
class _Module(ModuleType):
def __init__(self, module):
ModuleType.__init__(self, module.__name__)
for name in filter(lambda x: x.startswith('__') and x.endswith('__'), dir(module)):
setattr(self, name, getattr(module, name))
self.__origin__ = module
self.__torch_version__ = torch.__version__
self.__version__ = __VERSION__
@lru_cache()
def __getattr__(self, name):
if (name in self.__all__) or \
(hasattr(self.__origin__, name) and isinstance(getattr(self.__origin__, name), ModuleType)):
return getattr(self.__origin__, name)
else:
item = getattr(torch, name)
if isinstance(item, (FunctionType, BuiltinFunctionType)) and not name.startswith('_'):
return get_func_from_torch(name)
elif (isinstance(item, torch.dtype)) or \
isinstance(item, _basic_types) and name in _torch_all:
return item
else:
raise AttributeError(f'Attribute {repr(name)} not found in {repr(__name__)}.')
def __dir__(self) -> Iterable[str]:
|
import sys
sys.modules[__name__] = _Module(sys.modules[__name__])
|
return self.__all__
|
admin-layout.module.ts
|
import { NgModule } from '@angular/core';
import { RouterModule } from '@angular/router';
import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { ADMIN_LAYOUT_ROUTES } from './admin-layout.routes';
import { DashboardComponent } from '../../dashboard/dashboard.component';
import { UserProfileComponent } from '../../user-profile/user-profile.component';
import { TableListComponent } from '../../table-list/table-list.component';
import { TypographyComponent } from '../../typography/typography.component';
import { IconsComponent } from '../../icons/icons.component';
import { MapsComponent } from '../../maps/maps.component';
import { NotificationsComponent } from '../../notifications/notifications.component';
import { UpgradeComponent } from '../../upgrade/upgrade.component';
import { MatButtonModule, MatInputModule, MatRippleModule, MatTooltipModule } from '@angular/material';
import { ManageImagesComponent } from '../../views/manage-images/manage-images.component';
import {ArticlesListComponent} from "../../views/articles-list/articles-list.component";
import {DataTableModule} from "../../components/data-table/data-table.module";
@NgModule({
imports: [
CommonModule,
RouterModule.forChild(ADMIN_LAYOUT_ROUTES),
FormsModule,
MatButtonModule,
MatRippleModule,
MatInputModule,
MatTooltipModule,
DataTableModule,
],
declarations: [
DashboardComponent,
UserProfileComponent,
TableListComponent,
TypographyComponent,
IconsComponent,
MapsComponent,
NotificationsComponent,
UpgradeComponent,
ManageImagesComponent,
ArticlesListComponent,
],
})
export class
|
{}
|
AdminLayoutModule
|
foo_by_name_and_type_resource.go
|
// FooByNameAndTypeResource generated on 0000-00-00
// @see https://sdkgen.app
import (
"encoding/json"
"io/ioutil"
"net/http"
"time"
)
type FooByNameAndTypeResource struct {
BaseUrl string
Token string
Name string
Type string
}
// Listfoo Returns a collection
func (r FooByNameAndTypeResource) Listfoo(query GetQuery) EntryCollection {
req, err := http.NewRequest("GET", r.BaseURL + url, nil)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
respBody, _ := ioutil.ReadAll(resp.Body)
var response EntryCollection
json.Unmarshal(respBody, &response)
return response
}
// Createfoo
func (r FooByNameAndTypeResource) Createfoo(data EntryCreate) EntryMessage {
raw, err := json.Marshal(data)
if err != nil {
panic(err)
}
var reqBody = bytes.NewReader(raw)
req, err := http.NewRequest("POST", r.BaseURL + url, reqBody)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
respBody, _ := ioutil.ReadAll(resp.Body)
var response EntryMessage
json.Unmarshal(respBody, &response)
return response
}
// Put
func (r FooByNameAndTypeResource) Put(data EntryUpdate) EntryMessage {
raw, err := json.Marshal(data)
if err != nil {
panic(err)
}
var reqBody = bytes.NewReader(raw)
req, err := http.NewRequest("PUT", r.BaseURL + url, reqBody)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
respBody, _ := ioutil.ReadAll(resp.Body)
var response EntryMessage
json.Unmarshal(respBody, &response)
return response
}
// Delete
func (r FooByNameAndTypeResource) Delete() EntryMessage {
req, err := http.NewRequest("DELETE", r.BaseURL + url, nil)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
respBody, _ := ioutil.ReadAll(resp.Body)
var response EntryMessage
json.Unmarshal(respBody, &response)
return response
}
// Patch
func (r FooByNameAndTypeResource) Patch(data EntryPatch) EntryMessage {
raw, err := json.Marshal(data)
if err != nil {
panic(err)
}
var reqBody = bytes.NewReader(raw)
req, err := http.NewRequest("PATCH", r.BaseURL + url, reqBody)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
respBody, _ := ioutil.ReadAll(resp.Body)
var response EntryMessage
json.Unmarshal(respBody, &response)
return response
}
func
|
(name string, type string, baseUrl string, token string) FooByNameAndTypeResource {
r := FooByNameAndTypeResource {
BaseUrl: baseUrl + "/foo/"+name+"/"+type+"",
Token: token
}
return r
}
|
NewFooByNameAndTypeResource
|
theme.ts
|
import { createAction, handleActions } from 'redux-actions';
import { defaultTheme, Utils } from 'tuya-panel-kit';
import customTheme from '../../config/theme';
const { deepMerge } = Utils.ThemeUtils;
// Actions
export const toggleTheme = createAction('TOGGLE_THEME');
|
export const actions = {
toggleTheme,
updateTheme,
};
// Reducers
const theme = handleActions(
{
[toggleTheme.toString()]: (state: any) => {
return {
...state,
type: state.type === 'light' ? 'dark' : 'light',
};
},
[updateTheme.toString()]: (state, action) => deepMerge(state, action.payload),
},
deepMerge(defaultTheme, customTheme)
);
export const reducers = {
theme,
};
|
export const updateTheme = createAction('UPDATE_THEME');
|
FormFieldHelper.ts
|
import { FormMode } from '@/models/Enums';
import { Helper } from '@/utils/helper';
import { IFormField } from './IForm';
export default class FormFieldHelper {
public static createEmptyField(fieldType: string): IFormField {
const fieldTemplate = {} as IFormField;
fieldTemplate.type = fieldType;
fieldTemplate.id = Helper.generateTempId();
fieldTemplate.guid = Helper.generateUuid();
fieldTemplate.title = '';
fieldTemplate.helpText = '';
fieldTemplate.sortIndex = 0;
fieldTemplate.mode = FormMode.Toolbox;
fieldTemplate.fieldOptions = {};
|
return fieldTemplate;
}
public static createFromCopy(originalField: IFormField): IFormField {
const newField = Helper.deepCopy<IFormField>(originalField);
newField.guid = Helper.generateUuid();
// todo: this should in the future be taken away, we should only rely on guid
newField.id = Helper.generateTempId();
return newField;
}
}
| |
base.py
|
from django.db import transaction
from rest_framework import generics, mixins
class BaseAPIView(generics.GenericAPIView,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
@transaction.atomic
def get(self, request, *args, **kwargs):
|
@transaction.atomic
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
@transaction.atomic
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@transaction.atomic
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
if kwargs.get('pk'):
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
|
transfer_from_vault.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Caitlah Technology and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.controllers.accounts_controller import AccountsController
class TransferfromVault(Document):
def validate(self):
if not self.title:
self.title = self.doctype
def on_submit(self):
|
def make_trxn_entries_out(self):
userid = frappe.get_doc("Agents", self.transfer_from_vault)
doc = frappe.new_doc("Transactions Details")
doc.update({
"user_id": userid.agent_user,
"posting_date": self.transfer_date,
"currency": userid.agents_currency,
"description": self.doctype,
"outflow": self.transfer_amount,
"mctn": self.name
})
doc.insert()
doc.submit()
def make_trxn_entries_in(self):
userid = frappe.get_doc("Agents", self.transfer_to_agent)
doc = frappe.new_doc("Transactions Details")
doc.update({
"user_id": userid.agent_user,
"posting_date": self.transfer_date,
"currency": userid.agents_currency,
"description": self.doctype,
"inflow": self.transfer_amount,
"mctn": self.name
})
doc.insert()
doc.submit()
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
gl_map.append(
frappe._dict({
"posting_date": self.transfer_date,
"transaction_date": self.transfer_date,
"account": "Cash in Till - T&T",
"credit": self.transfer_amount,
"remarks": "Transfer from Vault to Teller",
"voucher_type": self.doctype,
"voucher_no": self.name,
"against": "Cash in Vault - T&T"
}))
gl_map.append(
frappe._dict({
"posting_date": self.transfer_date,
"transaction_date": self.transfer_date,
"account": "Cash in Vault - T&T",
"debit": self.transfer_amount,
"remarks": "Transfer from Vault to Teller",
"voucher_type": self.doctype,
"voucher_no": self.name,
"against": "Cash in Till - T&T"
}))
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj)
|
self.make_trxn_entries_out()
self.make_trxn_entries_in()
self.make_gl_entries()
|
user_authorization_test.go
|
// Copyright 2016 Apcera Inc. All rights reserved.
package test
import (
"regexp"
"testing"
)
const DefaultPass = "foo"
var permErrRe = regexp.MustCompile(`\A\-ERR\s+'Permissions Violation([^\r\n]+)\r\n`)
func
|
(t *testing.T) {
srv, opts := RunServerWithConfig("./configs/authorization.conf")
defer srv.Shutdown()
// Alice can do anything, check a few for OK result.
c := createClientConn(t, opts.Host, opts.Port)
defer c.Close()
expectAuthRequired(t, c)
doAuthConnect(t, c, "", "alice", DefaultPass)
expectResult(t, c, okRe)
sendProto(t, c, "PUB foo 2\r\nok\r\n")
expectResult(t, c, okRe)
sendProto(t, c, "SUB foo 1\r\n")
expectResult(t, c, okRe)
// Check that we now reserve _SYS.> though for internal, so no clients.
sendProto(t, c, "PUB _SYS.HB 2\r\nok\r\n")
expectResult(t, c, permErrRe)
// Check that _ is ok
sendProto(t, c, "PUB _ 2\r\nok\r\n")
expectResult(t, c, okRe)
c.Close()
// Bob is a requestor only, e.g. req.foo, req.bar for publish, subscribe only to INBOXes.
c = createClientConn(t, opts.Host, opts.Port)
defer c.Close()
expectAuthRequired(t, c)
doAuthConnect(t, c, "", "bob", DefaultPass)
expectResult(t, c, okRe)
// These should error.
sendProto(t, c, "SUB foo 1\r\n")
expectResult(t, c, permErrRe)
sendProto(t, c, "PUB foo 2\r\nok\r\n")
expectResult(t, c, permErrRe)
// These should work ok.
sendProto(t, c, "SUB _INBOX.abcd 1\r\n")
expectResult(t, c, okRe)
sendProto(t, c, "PUB req.foo 2\r\nok\r\n")
expectResult(t, c, okRe)
sendProto(t, c, "PUB req.bar 2\r\nok\r\n")
expectResult(t, c, okRe)
c.Close()
// Joe is a default user
c = createClientConn(t, opts.Host, opts.Port)
defer c.Close()
expectAuthRequired(t, c)
doAuthConnect(t, c, "", "joe", DefaultPass)
expectResult(t, c, okRe)
// These should error.
sendProto(t, c, "SUB foo.bar.* 1\r\n")
expectResult(t, c, permErrRe)
sendProto(t, c, "PUB foo.bar.baz 2\r\nok\r\n")
expectResult(t, c, permErrRe)
// These should work ok.
sendProto(t, c, "SUB _INBOX.abcd 1\r\n")
expectResult(t, c, okRe)
sendProto(t, c, "SUB PUBLIC.abcd 1\r\n")
expectResult(t, c, okRe)
sendProto(t, c, "PUB SANDBOX.foo 2\r\nok\r\n")
expectResult(t, c, okRe)
sendProto(t, c, "PUB SANDBOX.bar 2\r\nok\r\n")
expectResult(t, c, okRe)
// Since only PWC, this should fail (too many tokens).
sendProto(t, c, "PUB SANDBOX.foo.bar 2\r\nok\r\n")
expectResult(t, c, permErrRe)
c.Close()
}
|
TestUserAuthorizationProto
|
issue-5239-1.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for issue #5239
fn
|
() {
let x: |int| -> int = |ref x| { x += 1; }; //~ ERROR binary assignment operation += cannot be applied to type `&int`
}
|
main
|
handler.rs
|
// This file is part of Substrate.
// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming
//! and outgoing substreams for all gossiping protocols.
//!
//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the
//! gossiping protocols that are Substrate-related and outside of the scope of libp2p.
//!
//! # Usage
//!
//! From an API perspective, for each of its protocols, the [`NotifsHandler`] is always in one of
//! the following state (see [`State`]):
//!
//! - Closed substream. This is the initial state.
//! - Closed substream, but remote desires them to be open.
//! - Open substream.
//! - Open substream, but remote desires them to be closed.
//!
//! Each protocol in the [`NotifsHandler`] can spontaneously switch between these states:
//!
//! - "Closed substream" to "Closed substream but open desired". When that happens, a
//! [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted.
//! - "Closed substream but open desired" to "Closed substream" (i.e. the remote has cancelled
//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted.
//! - "Open substream" to "Open substream but close desired". When that happens, a
//! [`NotifsHandlerOut::CloseDesired`] is emitted.
//!
//! The user can instruct a protocol in the `NotifsHandler` to switch from "closed" to "open" or
//! vice-versa by sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The
//! `NotifsHandler` must answer with [`NotifsHandlerOut::OpenResultOk`] or
//! [`NotifsHandlerOut::OpenResultErr`], or with [`NotifsHandlerOut::CloseResult`].
//!
//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the substream is now in the open state.
//! When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is emitted,
//! the `NotifsHandler` is now (or remains) in the closed state.
//!
//! When a [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted, the user should always send back
//! either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the
//! remote will be left in a pending state.
//!
//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted
//! [`NotifsHandlerIn::Open`] has gotten an answer.
use crate::protocol::generic_proto::upgrade::{
NotificationsHandshakeError, NotificationsIn, NotificationsInSubstream, NotificationsOut,
NotificationsOutSubstream, UpgradeCollec,
};
use bytes::BytesMut;
use futures::{
channel::mpsc,
lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard},
prelude::*,
};
use libp2p::core::{
upgrade::{InboundUpgrade, OutboundUpgrade},
ConnectedPoint, PeerId,
};
use libp2p::swarm::{
IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, ProtocolsHandlerEvent,
ProtocolsHandlerUpgrErr, SubstreamProtocol,
};
use log::error;
use parking_lot::{Mutex, RwLock};
use std::{
borrow::Cow,
collections::VecDeque,
mem,
pin::Pin,
str,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use wasm_timer::Instant;
/// Number of pending notifications in asynchronous contexts.
/// See [`NotificationsSink::reserve_notification`] for context.
const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
/// Number of pending notifications in synchronous contexts.
const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048;
/// Maximum duration to open a substream and receive the handshake message. After that, we
/// consider that we failed to open the substream.
const OPEN_TIMEOUT: Duration = Duration::from_secs(10);
/// After successfully establishing a connection with the remote, we keep the connection open for
/// at least this amount of time in order to give the rest of the code the chance to notify us to
/// open substreams.
const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5);
/// Implements the `IntoProtocolsHandler` trait of libp2p.
///
/// Every time a connection with a remote starts, an instance of this struct is created and
/// sent to a background task dedicated to this connection. Once the connection is established,
/// it is turned into a [`NotifsHandler`].
///
/// See the documentation at the module level for more information.
#[derive(Clone)]
pub struct NotifsHandlerProto {
/// Name of protocols, prototypes for upgrades for inbound substreams, and the message we
/// send or respond with in the handshake.
protocols: Vec<(
Cow<'static, str>,
NotificationsIn,
Arc<RwLock<Vec<u8>>>,
u64,
)>,
}
/// The actual handler once the connection has been established.
///
/// See the documentation at the module level for more information.
pub struct NotifsHandler {
/// List of notification protocols, specified by the user at initialization.
protocols: Vec<Protocol>,
/// When the connection with the remote has been successfully established.
when_connection_open: Instant,
/// Whether we are the connection dialer or listener.
endpoint: ConnectedPoint,
/// Remote we are connected to.
peer_id: PeerId,
/// Events to return in priority from `poll`.
events_queue: VecDeque<
ProtocolsHandlerEvent<NotificationsOut, usize, NotifsHandlerOut, NotifsHandlerError>,
>,
}
/// Fields specific for each individual protocol.
struct Protocol {
/// Name of the protocol.
name: Cow<'static, str>,
/// Prototype for the inbound upgrade.
in_upgrade: NotificationsIn,
/// Handshake to send when opening a substream or receiving an open request.
handshake: Arc<RwLock<Vec<u8>>>,
/// Maximum allowed size of individual notifications.
max_notification_size: u64,
/// Current state of the substreams for this protocol.
state: State,
}
/// See the module-level documentation to learn about the meaning of these variants.
#[allow(clippy::large_enum_variant)]
enum State {
/// Protocol is in the "Closed" state.
Closed {
/// True if an outgoing substream is still in the process of being opened.
pending_opening: bool,
},
/// Protocol is in the "Closed" state. A [`NotifsHandlerOut::OpenDesiredByRemote`] has been
/// emitted.
OpenDesiredByRemote {
/// Substream opened by the remote and that hasn't been accepted/rejected yet.
in_substream: NotificationsInSubstream<NegotiatedSubstream>,
/// See [`State::Closed::pending_opening`].
pending_opening: bool,
},
/// Protocol is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is
/// consequently trying to open the various notifications substreams.
///
/// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must
/// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`].
Opening {
/// Substream opened by the remote. If `Some`, has been accepted.
in_substream: Option<NotificationsInSubstream<NegotiatedSubstream>>,
},
/// Protocol is in the "Open" state.
Open {
/// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been
/// sent out. The notifications to send out can be pulled from this receivers.
/// We use two different channels in order to have two different channel sizes, but from
/// the receiving point of view, the two channels are the same.
/// The receivers are fused in case the user drops the [`NotificationsSink`] entirely.
notifications_sink_rx: stream::Select<
stream::Fuse<mpsc::Receiver<NotificationsSinkMessage>>,
stream::Fuse<mpsc::Receiver<NotificationsSinkMessage>>,
>,
/// Outbound substream that has been accepted by the remote.
///
/// Always `Some` on transition to [`State::Open`]. Switched to `None` only if the remote
/// closed the substream. If `None`, a [`NotifsHandlerOut::CloseDesired`] event has been
/// emitted.
out_substream: Option<NotificationsOutSubstream<NegotiatedSubstream>>,
/// Substream opened by the remote.
///
/// Contrary to the `out_substream` field, operations continue as normal even if the
/// substream has been closed by the remote. A `None` is treated the same way as if there
/// was an idle substream.
in_substream: Option<NotificationsInSubstream<NegotiatedSubstream>>,
},
}
impl IntoProtocolsHandler for NotifsHandlerProto {
type Handler = NotifsHandler;
fn inbound_protocol(&self) -> UpgradeCollec<NotificationsIn> {
self.protocols
.iter()
.map(|(_, p, _, _)| p.clone())
.collect::<UpgradeCollec<_>>()
}
fn into_handler(self, peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler {
NotifsHandler {
protocols: self
.protocols
.into_iter()
.map(|(name, in_upgrade, handshake, max_size)| Protocol {
name,
in_upgrade,
handshake,
state: State::Closed {
pending_opening: false,
},
max_notification_size: max_size,
})
.collect(),
peer_id: *peer_id,
endpoint: connected_point.clone(),
when_connection_open: Instant::now(),
events_queue: VecDeque::with_capacity(16),
}
}
}
/// Event that can be received by a `NotifsHandler`.
#[derive(Debug, Clone)]
pub enum NotifsHandlerIn {
/// Instruct the handler to open the notification substreams.
///
/// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a
/// [`NotifsHandlerOut::OpenResultErr`] event.
///
/// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is
/// already in the fly. It is however possible if a `Close` is still in the fly.
Open {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
},
/// Instruct the handler to close the notification substreams, or reject any pending incoming
/// substream request.
///
/// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event.
Close {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
},
}
/// Event that can be emitted by a `NotifsHandler`.
#[derive(Debug)]
pub enum NotifsHandlerOut {
/// Acknowledges a [`NotifsHandlerIn::Open`].
OpenResultOk {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
/// The endpoint of the connection that is open for custom protocols.
endpoint: ConnectedPoint,
/// Handshake that was sent to us.
/// This is normally a "Status" message, but this out of the concern of this code.
received_handshake: Vec<u8>,
/// How notifications can be sent to this node.
notifications_sink: NotificationsSink,
},
/// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open
/// notification substreams.
OpenResultErr {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
},
/// Acknowledges a [`NotifsHandlerIn::Close`].
CloseResult {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
},
/// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a
/// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a
/// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not
/// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send
/// another [`NotifsHandlerIn`].
OpenDesiredByRemote {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
},
/// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in
/// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet
/// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send
/// another one.
CloseDesired {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
},
/// Received a message on a custom protocol substream.
///
/// Can only happen when the handler is in the open state.
Notification {
/// Index of the protocol in the list of protocols passed at initialization.
protocol_index: usize,
/// Message that has been received.
message: BytesMut,
},
}
/// Sink connected directly to the node background task. Allows sending notifications to the peer.
///
/// Can be cloned in order to obtain multiple references to the substream of the same peer.
#[derive(Debug, Clone)]
pub struct NotificationsSink {
inner: Arc<NotificationsSinkInner>,
}
#[derive(Debug)]
struct NotificationsSinkInner {
/// Target of the sink.
peer_id: PeerId,
/// Sender to use in asynchronous contexts. Uses an asynchronous mutex.
async_channel: FuturesMutex<mpsc::Sender<NotificationsSinkMessage>>,
/// Sender to use in synchronous contexts. Uses a synchronous mutex.
/// This channel has a large capacity and is meant to be used in contexts where
/// back-pressure cannot be properly exerted.
/// It will be removed in a future version.
sync_channel: Mutex<mpsc::Sender<NotificationsSinkMessage>>,
}
/// Message emitted through the [`NotificationsSink`] and processed by the background task
/// dedicated to the peer.
#[derive(Debug)]
enum NotificationsSinkMessage {
/// Message emitted by [`NotificationsSink::reserve_notification`] and
/// [`NotificationsSink::write_notification_now`].
Notification { message: Vec<u8> },
/// Must close the connection.
ForceClose,
}
impl NotificationsSink {
/// Returns the [`PeerId`] the sink is connected to.
pub fn peer_id(&self) -> &PeerId {
&self.inner.peer_id
}
/// Sends a notification to the peer.
///
/// If too many messages are already buffered, the notification is silently discarded and the
/// connection to the peer will be closed shortly after.
///
/// The protocol name is expected to be checked ahead of calling this method. It is a logic
/// error to send a notification using an unknown protocol.
///
/// This method will be removed in a future version.
pub fn send_sync_notification(&self, message: impl Into<Vec<u8>>) {
let mut lock = self.inner.sync_channel.lock();
let result = lock.try_send(NotificationsSinkMessage::Notification {
message: message.into(),
});
if result.is_err() {
// Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the
// buffer, and therefore `try_send` will succeed.
let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose);
debug_assert!(_result2
.map(|()| true)
.unwrap_or_else(|err| err.is_disconnected()));
}
}
/// Wait until the remote is ready to accept a notification.
///
/// Returns an error in the case where the connection is closed.
///
/// The protocol name is expected to be checked ahead of calling this method. It is a logic
/// error to send a notification using an unknown protocol.
pub async fn
|
(&self) -> Result<Ready<'_>, ()> {
let mut lock = self.inner.async_channel.lock().await;
let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await;
if poll_ready.is_ok() {
Ok(Ready { lock })
} else {
Err(())
}
}
}
/// Notification slot is reserved and the notification can actually be sent.
#[must_use]
#[derive(Debug)]
pub struct Ready<'a> {
/// Guarded channel. The channel inside is guaranteed to not be full.
lock: FuturesMutexGuard<'a, mpsc::Sender<NotificationsSinkMessage>>,
}
impl<'a> Ready<'a> {
/// Consumes this slots reservation and actually queues the notification.
///
/// Returns an error if the substream has been closed.
pub fn send(mut self, notification: impl Into<Vec<u8>>) -> Result<(), ()> {
self.lock
.start_send(NotificationsSinkMessage::Notification {
message: notification.into(),
})
.map_err(|_| ())
}
}
/// Error specific to the collection of protocols.
#[derive(Debug, derive_more::Display, derive_more::Error)]
pub enum NotifsHandlerError {
/// Channel of synchronous notifications is full.
SyncNotificationsClogged,
}
impl NotifsHandlerProto {
/// Builds a new handler.
///
/// `list` is a list of notification protocols names, the message to send as part of the
/// handshake, and the maximum allowed size of a notification. At the moment, the message
/// is always the same whether we open a substream ourselves or respond to handshake from
/// the remote.
pub fn new(list: impl Into<Vec<(Cow<'static, str>, Arc<RwLock<Vec<u8>>>, u64)>>) -> Self {
let protocols = list
.into()
.into_iter()
.map(|(proto_name, msg, max_notif_size)| {
(
proto_name.clone(),
NotificationsIn::new(proto_name, max_notif_size),
msg,
max_notif_size,
)
})
.collect();
NotifsHandlerProto { protocols }
}
}
impl ProtocolsHandler for NotifsHandler {
type InEvent = NotifsHandlerIn;
type OutEvent = NotifsHandlerOut;
type Error = NotifsHandlerError;
type InboundProtocol = UpgradeCollec<NotificationsIn>;
type OutboundProtocol = NotificationsOut;
// Index within the `out_protocols`.
type OutboundOpenInfo = usize;
type InboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
let protocols = self
.protocols
.iter()
.map(|p| p.in_upgrade.clone())
.collect::<UpgradeCollec<_>>();
SubstreamProtocol::new(protocols, ())
}
fn inject_fully_negotiated_inbound(
&mut self,
((_remote_handshake, mut new_substream), protocol_index):
<Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
(): (),
) {
let mut protocol_info = &mut self.protocols[protocol_index];
match protocol_info.state {
State::Closed { pending_opening } => {
self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
NotifsHandlerOut::OpenDesiredByRemote { protocol_index },
));
protocol_info.state = State::OpenDesiredByRemote {
in_substream: new_substream,
pending_opening,
};
}
State::OpenDesiredByRemote { .. } => {
// If a substream already exists, silently drop the new one.
// Note that we drop the substream, which will send an equivalent to a
// TCP "RST" to the remote and force-close the substream. It might
// seem like an unclean way to get rid of a substream. However, keep
// in mind that it is invalid for the remote to open multiple such
// substreams, and therefore sending a "RST" is the most correct thing
// to do.
}
State::Opening {
ref mut in_substream,
..
}
| State::Open {
ref mut in_substream,
..
} => {
if in_substream.is_some() {
// Same remark as above.
return;
}
// Create `handshake_message` on a separate line to be sure that the
// lock is released as soon as possible.
let handshake_message = protocol_info.handshake.read().clone();
new_substream.send_handshake(handshake_message);
*in_substream = Some(new_substream);
}
}
}
fn inject_fully_negotiated_outbound(
&mut self,
(handshake, substream): <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
protocol_index: Self::OutboundOpenInfo,
) {
match self.protocols[protocol_index].state {
State::Closed {
ref mut pending_opening,
}
| State::OpenDesiredByRemote {
ref mut pending_opening,
..
} => {
debug_assert!(*pending_opening);
*pending_opening = false;
}
State::Open { .. } => {
error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler");
debug_assert!(false);
}
State::Opening {
ref mut in_substream,
} => {
let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE);
let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE);
let notifications_sink = NotificationsSink {
inner: Arc::new(NotificationsSinkInner {
peer_id: self.peer_id,
async_channel: FuturesMutex::new(async_tx),
sync_channel: Mutex::new(sync_tx),
}),
};
self.protocols[protocol_index].state = State::Open {
notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()),
out_substream: Some(substream),
in_substream: in_substream.take(),
};
self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
NotifsHandlerOut::OpenResultOk {
protocol_index,
endpoint: self.endpoint.clone(),
received_handshake: handshake,
notifications_sink,
},
));
}
}
}
fn inject_event(&mut self, message: NotifsHandlerIn) {
match message {
NotifsHandlerIn::Open { protocol_index } => {
let protocol_info = &mut self.protocols[protocol_index];
match &mut protocol_info.state {
State::Closed { pending_opening } => {
if !*pending_opening {
let proto = NotificationsOut::new(
protocol_info.name.clone(),
protocol_info.handshake.read().clone(),
protocol_info.max_notification_size,
);
self.events_queue.push_back(
ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(proto, protocol_index)
.with_timeout(OPEN_TIMEOUT),
},
);
}
protocol_info.state = State::Opening { in_substream: None };
}
State::OpenDesiredByRemote {
pending_opening,
in_substream,
} => {
let handshake_message = protocol_info.handshake.read().clone();
if !*pending_opening {
let proto = NotificationsOut::new(
protocol_info.name.clone(),
handshake_message.clone(),
protocol_info.max_notification_size,
);
self.events_queue.push_back(
ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(proto, protocol_index)
.with_timeout(OPEN_TIMEOUT),
},
);
}
in_substream.send_handshake(handshake_message);
// The state change is done in two steps because of borrowing issues.
let in_substream = match mem::replace(
&mut protocol_info.state,
State::Opening { in_substream: None },
) {
State::OpenDesiredByRemote { in_substream, .. } => in_substream,
_ => unreachable!(),
};
protocol_info.state = State::Opening {
in_substream: Some(in_substream),
};
}
State::Opening { .. } | State::Open { .. } => {
// As documented, it is forbidden to send an `Open` while there is already
// one in the fly.
error!(target: "sub-libp2p", "opening already-opened handler");
debug_assert!(false);
}
}
}
NotifsHandlerIn::Close { protocol_index } => {
match self.protocols[protocol_index].state {
State::Open { .. } => {
self.protocols[protocol_index].state = State::Closed {
pending_opening: false,
};
}
State::Opening { .. } => {
self.protocols[protocol_index].state = State::Closed {
pending_opening: true,
};
self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
NotifsHandlerOut::OpenResultErr { protocol_index },
));
}
State::OpenDesiredByRemote {
pending_opening, ..
} => {
self.protocols[protocol_index].state = State::Closed { pending_opening };
}
State::Closed { .. } => {}
}
self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
NotifsHandlerOut::CloseResult { protocol_index },
));
}
}
}
fn inject_dial_upgrade_error(
&mut self,
num: usize,
_: ProtocolsHandlerUpgrErr<NotificationsHandshakeError>,
) {
match self.protocols[num].state {
State::Closed {
ref mut pending_opening,
}
| State::OpenDesiredByRemote {
ref mut pending_opening,
..
} => {
debug_assert!(*pending_opening);
*pending_opening = false;
}
State::Opening { .. } => {
self.protocols[num].state = State::Closed {
pending_opening: false,
};
self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
NotifsHandlerOut::OpenResultErr {
protocol_index: num,
},
));
}
// No substream is being open when already `Open`.
State::Open { .. } => debug_assert!(false),
}
}
fn connection_keep_alive(&self) -> KeepAlive {
// `Yes` if any protocol has some activity.
if self
.protocols
.iter()
.any(|p| !matches!(p.state, State::Closed { .. }))
{
return KeepAlive::Yes;
}
// A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote
// to express desire to open substreams.
KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME)
}
fn poll(
&mut self,
cx: &mut Context,
) -> Poll<
ProtocolsHandlerEvent<
Self::OutboundProtocol,
Self::OutboundOpenInfo,
Self::OutEvent,
Self::Error,
>,
> {
if let Some(ev) = self.events_queue.pop_front() {
return Poll::Ready(ev);
}
for protocol_index in 0..self.protocols.len() {
// Poll inbound substreams.
// Inbound substreams being closed is always tolerated, except for the
// `OpenDesiredByRemote` state which might need to be switched back to `Closed`.
match &mut self.protocols[protocol_index].state {
State::Closed { .. }
| State::Open {
in_substream: None, ..
}
| State::Opening { in_substream: None } => {}
State::Open {
in_substream: in_substream @ Some(_),
..
} => match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) {
Poll::Pending => {}
Poll::Ready(Some(Ok(message))) => {
let event = NotifsHandlerOut::Notification {
protocol_index,
message,
};
return Poll::Ready(ProtocolsHandlerEvent::Custom(event));
}
Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None,
},
State::OpenDesiredByRemote {
in_substream,
pending_opening,
} => match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) {
Poll::Pending => {}
Poll::Ready(Ok(void)) => match void {},
Poll::Ready(Err(_)) => {
self.protocols[protocol_index].state = State::Closed {
pending_opening: *pending_opening,
};
return Poll::Ready(ProtocolsHandlerEvent::Custom(
NotifsHandlerOut::CloseDesired { protocol_index },
));
}
},
State::Opening {
in_substream: in_substream @ Some(_),
..
} => {
match NotificationsInSubstream::poll_process(
Pin::new(in_substream.as_mut().unwrap()),
cx,
) {
Poll::Pending => {}
Poll::Ready(Ok(void)) => match void {},
Poll::Ready(Err(_)) => *in_substream = None,
}
}
}
// Poll outbound substream.
match &mut self.protocols[protocol_index].state {
State::Open {
out_substream: out_substream @ Some(_),
..
} => {
match Sink::poll_flush(Pin::new(out_substream.as_mut().unwrap()), cx) {
Poll::Pending | Poll::Ready(Ok(())) => {}
Poll::Ready(Err(_)) => {
*out_substream = None;
let event = NotifsHandlerOut::CloseDesired { protocol_index };
return Poll::Ready(ProtocolsHandlerEvent::Custom(event));
}
};
}
State::Closed { .. }
| State::Opening { .. }
| State::Open {
out_substream: None,
..
}
| State::OpenDesiredByRemote { .. } => {}
}
if let State::Open {
notifications_sink_rx,
out_substream: Some(out_substream),
..
} = &mut self.protocols[protocol_index].state
{
loop {
// Before we poll the notifications sink receiver, check that the substream
// is ready to accept a message.
match out_substream.poll_ready_unpin(cx) {
Poll::Ready(_) => {}
Poll::Pending => break,
}
// Now that all substreams are ready for a message, grab what to send.
let message = match notifications_sink_rx.poll_next_unpin(cx) {
Poll::Ready(Some(msg)) => msg,
Poll::Ready(None) | Poll::Pending => break,
};
match message {
NotificationsSinkMessage::Notification { message } => {
let _ = out_substream.start_send_unpin(message);
// Calling `start_send_unpin` only queues the message. Actually
// emitting the message is done with `poll_flush`. In order to
// not introduce too much complexity, this flushing is done earlier
// in the body of this `poll()` method. As such, we schedule a task
// wake-up now in order to guarantee that `poll()` will be called
// again and the flush happening.
// At the time of the writing of this comment, a rewrite of this
// code is being planned. If you find this comment in the wild and
// the rewrite didn't happen, please consider a refactor.
cx.waker().wake_by_ref();
}
NotificationsSinkMessage::ForceClose => {
return Poll::Ready(ProtocolsHandlerEvent::Close(
NotifsHandlerError::SyncNotificationsClogged,
));
}
}
}
}
}
Poll::Pending
}
}
|
reserve_notification
|
Engineer.js
|
//import the parent class
const Employee = require('./Employee');
class
|
extends Employee {
constructor(name, id, email, github) {
super(name, id, email);
this.github = github;
}
getRole(){
return "Engineer";
}
getGithub() {
return this.github;
}
}
module.exports = Engineer;
|
Engineer
|
_nbdev.py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"sparsify": "neighbors.ipynb",
"hstack": "neighbors.ipynb",
"vstack": "neighbors.ipynb",
"stack": "neighbors.ipynb",
"NMSLibSklearnWrapper": "neighbors.ipynb",
"FastCosineNN": "neighbors.ipynb",
"FastJaccardNN": "neighbors.ipynb",
"FastL2NN": "neighbors.ipynb",
"FastKLDivNN": "neighbors.ipynb"}
modules = ["neighbors.py"]
doc_url = "https://AlanGanem.github.io/NMSLearn/nmslearn/"
git_url = "https://github.com/AlanGanem/nmslearn/tree/master/"
def custom_doc_links(name):
|
return None
|
|
sq_nubia_plugin.py
|
import argparse
from suzieq.cli.sqcmds import *
from suzieq.cli.sqcmds import context_commands
from suzieq.cli.sqcmds import sqcmds_all
from suzieq.cli.sq_nubia_context import NubiaSuzieqContext
from suzieq.cli.sq_nubia_statusbar import NubiaSuzieqStatusBar
from nubia import PluginInterface, CompletionDataSource
from nubia.internal.blackcmd import CommandBlacklist
from nubia.internal.cmdbase import AutoCommand
class NubiaSuzieqPlugin(PluginInterface):
"""
The PluginInterface class is a way to customize nubia for every customer
use case. It allowes custom argument validation, control over command
loading, custom context objects, and much more.
"""
def create_context(self):
"""
Must create an object that inherits from `Context` parent class.
The plugin can return a custom context but it has to inherit from the
correct parent class.
"""
return NubiaSuzieqContext()
def validate_args(self, args):
"""
This will be executed when starting nubia, the args passed is a
dict-like object that contains the argparse result after parsing the
command line arguments. The plugin can choose to update the context
with the values, and/or decide to raise `ArgsValidationError` with
the error message.
"""
pass
def get_commands(self):
cmds = [AutoCommand(getattr(globals()[x], x))
for x in sqcmds_all if not x.startswith('_')]
cmds.append(AutoCommand(context_commands.set_ctxt))
cmds.append(AutoCommand(context_commands.clear_ctxt))
return cmds
def get_opts_parser(self, add_help=True):
"""
Builds the ArgumentParser that will be passed to , use this to
build your list of arguments that you want for your shell.
"""
opts_parser = argparse.ArgumentParser(
description="Suzieq CLI",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,
)
opts_parser.add_argument(
"--config", "-c", default="", type=str, help="Configuration File"
)
opts_parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
|
help="Increase verbosity, can be specified " "multiple times",
)
opts_parser.add_argument(
"--stderr",
"-s",
action="store_true",
default=True,
help="By default the logging output goes to stderr "
"Enable this feature to send it to a temporary logfile"
)
# we only support pandas now, so we don't want this option
# opts_parser.add_argument(
# "--use-engine", "-e", help="Which analysis engine to use", default="pandas"
# )
return opts_parser
def get_completion_datasource_for_global_argument(self, argument):
if argument == "--config":
return ConfigFileCompletionDataSource()
if argument == "--use-engine":
return ConfigEngineCompletionDataSource()
return None
def create_usage_logger(self, context):
"""
Override this and return you own usage logger.
Must be a subtype of UsageLoggerInterface.
"""
return None
def get_status_bar(self, context):
"""
This returns the StatusBar object that handles the bottom status bar
and the right-side per-line status
"""
return NubiaSuzieqStatusBar(context)
def getBlacklistPlugin(self):
blacklister = CommandBlacklist()
blacklister.add_blocked_command("topcpu")
blacklister.add_blocked_command("topmem")
return blacklister
class ConfigFileCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["/tmp/c1", "/tmp/c2"]
class ConfigEngineCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["pandas"]
| |
pildriver.py
|
#!/home/adam/django/Accountant/accenv/bin/python3
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
|
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
|
__init__.py
|
"""Support for HomematicIP Cloud devices."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .config_flow import configured_haps
from .const import (
CONF_ACCESSPOINT, CONF_AUTHTOKEN, DOMAIN, HMIPC_AUTHTOKEN, HMIPC_HAPID,
HMIPC_NAME)
from .device import HomematicipGenericDevice # noqa: F401
from .hap import HomematicipAuth, HomematicipHAP # noqa: F401
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = 'duration'
ATTR_ENDTIME = 'endtime'
ATTR_TEMPERATURE = 'temperature'
ATTR_ACCESSPOINT_ID = 'accesspoint_id'
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION = 'activate_eco_mode_with_duration'
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD = 'activate_eco_mode_with_period'
SERVICE_ACTIVATE_VACATION = 'activate_vacation'
SERVICE_DEACTIVATE_ECO_MODE = 'deactivate_eco_mode'
SERVICE_DEACTIVATE_VACATION = 'deactivate_vacation'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN, default=[]): vol.All(cv.ensure_list, [vol.Schema({
vol.Optional(CONF_NAME, default=''): vol.Any(cv.string),
vol.Required(CONF_ACCESSPOINT): cv.string,
vol.Required(CONF_AUTHTOKEN): cv.string,
})]),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION = vol.Schema({
vol.Required(ATTR_DURATION): cv.positive_int,
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD = vol.Schema({
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_ACTIVATE_VACATION = vol.Schema({
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Required(ATTR_TEMPERATURE, default=18.0):
vol.All(vol.Coerce(float), vol.Range(min=0, max=55)),
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_DEACTIVATE_ECO_MODE = vol.Schema({
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_DEACTIVATE_VACATION = vol.Schema({
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the HomematicIP Cloud component."""
hass.data[DOMAIN] = {}
accesspoints = config.get(DOMAIN, [])
for conf in accesspoints:
if conf[CONF_ACCESSPOINT] not in configured_haps(hass):
hass.async_add_job(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={
HMIPC_HAPID: conf[CONF_ACCESSPOINT],
HMIPC_AUTHTOKEN: conf[CONF_AUTHTOKEN],
HMIPC_NAME: conf[CONF_NAME],
}
))
async def _async_activate_eco_mode_with_duration(service):
"""Service to activate eco mode with duration."""
duration = service.data[ATTR_DURATION]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_absence_with_duration(duration)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_absence_with_duration(duration)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
_async_activate_eco_mode_with_duration,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION)
async def _async_activate_eco_mode_with_period(service):
"""Service to activate eco mode with period."""
endtime = service.data[ATTR_ENDTIME]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_absence_with_period(endtime)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_absence_with_period(endtime)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
_async_activate_eco_mode_with_period,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD)
async def _async_activate_vacation(service):
"""Service to activate vacation."""
endtime = service.data[ATTR_ENDTIME]
temperature = service.data[ATTR_TEMPERATURE]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_vacation(endtime, temperature)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_vacation(endtime, temperature)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_VACATION, _async_activate_vacation,
schema=SCHEMA_ACTIVATE_VACATION)
async def _async_deactivate_eco_mode(service):
"""Service to deactivate eco mode."""
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
|
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.deactivate_absence()
hass.services.async_register(
DOMAIN, SERVICE_DEACTIVATE_ECO_MODE, _async_deactivate_eco_mode,
schema=SCHEMA_DEACTIVATE_ECO_MODE)
async def _async_deactivate_vacation(service):
"""Service to deactivate vacation."""
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.deactivate_vacation()
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.deactivate_vacation()
hass.services.async_register(
DOMAIN, SERVICE_DEACTIVATE_VACATION, _async_deactivate_vacation,
schema=SCHEMA_DEACTIVATE_VACATION)
def _get_home(hapid: str):
"""Return a HmIP home."""
hap = hass.data[DOMAIN][hapid]
if hap:
return hap.home
return None
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up an access point from a config entry."""
hap = HomematicipHAP(hass, entry)
hapid = entry.data[HMIPC_HAPID].replace('-', '').upper()
hass.data[DOMAIN][hapid] = hap
if not await hap.async_setup():
return False
# Register hap as device in registry.
device_registry = await dr.async_get_registry(hass)
home = hap.home
# Add the HAP name from configuration if set.
hapname = home.label \
if not home.name else "{} {}".format(home.label, home.name)
device_registry.async_get_or_create(
config_entry_id=home.id,
identifiers={(DOMAIN, home.id)},
manufacturer='eQ-3',
name=hapname,
model=home.modelType,
sw_version=home.currentAPVersion,
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hap = hass.data[DOMAIN].pop(entry.data[HMIPC_HAPID])
return await hap.async_reset()
|
home = _get_home(hapid)
if home:
await home.deactivate_absence()
|
merge_cmd.go
|
// SPDX-License-Identifier: Apache-2.0
// Copyright © 2021 Wrangle Ltd
package wrgl
import (
"bytes"
"context"
"encoding/csv"
"encoding/hex"
"fmt"
"os"
"path"
"runtime"
"sort"
"strings"
"time"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
"github.com/schollz/progressbar/v3"
"github.com/spf13/cobra"
"github.com/wrgl/wrgl/cmd/wrgl/utils"
"github.com/wrgl/wrgl/pkg/conf"
conffs "github.com/wrgl/wrgl/pkg/conf/fs"
"github.com/wrgl/wrgl/pkg/diff"
"github.com/wrgl/wrgl/pkg/ingest"
"github.com/wrgl/wrgl/pkg/merge"
"github.com/wrgl/wrgl/pkg/objects"
"github.com/wrgl/wrgl/pkg/ref"
"github.com/wrgl/wrgl/pkg/slice"
"github.com/wrgl/wrgl/pkg/sorter"
"github.com/wrgl/wrgl/pkg/widgets"
)
func getTable(db objects.Store, comSum []byte) (sum []byte, tbl *objects.Table, err error) {
com, err := objects.GetCommit(db, comSum)
if err != nil {
return
}
tbl, err = objects.GetTable(db, com.Table)
if err != nil {
return
}
return com.Table, tbl, nil
}
func mergeCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "merge BRANCH COMMIT...",
Short: "Merge two or more commits together.",
Long: "Merge two or more commits together using merge UI. If merge is successful then create a merge commit under BRANCH.",
Example: utils.CombineExamples([]utils.Example{
{
Comment: "merge two branches",
Line: "wrgl merge branch-1 branch-2",
},
{
Comment: "merge a commit into a branch",
Line: "wrgl merge my-branch 43a5f3447e82b53a2574ef5af470df96",
},
{
Comment: "perform merge but don't create a merge commit, output result to file MERGE_SUM1_SUM2.csv instead",
Line: "wrgl merge branch-1 branch-2 --no-commit",
},
{
Comment: "don't show merge UI, output conflicts and resolved rows to CONFLICTS_SUM1_SUM2.csv instead",
Line: "wrgl merge branch-1 branch-2 --no-gui",
},
{
Comment: "create a merge commit from an already resolved CSV file",
Line: "wrgl merge branch-1 branch-2 --commit-csv resolved.csv",
},
}),
Args: cobra.MinimumNArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
rd := utils.GetRepoDir(cmd)
defer rd.Close()
s := conffs.NewStore(rd.FullPath, conffs.AggregateSource, "")
c, err := s.Open()
if err != nil {
return err
}
if err := ensureUserSet(cmd, c); err != nil {
return err
}
cleanup, err := setupDebugLog(cmd)
if err != nil {
return err
}
defer cleanup()
db, err := rd.OpenObjectsStore()
if err != nil {
return err
}
defer db.Close()
rs := rd.OpenRefStore()
noCommit, err := cmd.Flags().GetBool("no-commit")
if err != nil {
return err
}
noGUI, err := cmd.Flags().GetBool("no-gui")
if err != nil {
return err
}
commitCSV, err := cmd.Flags().GetString("commit-csv")
if err != nil {
return err
}
numWorkers, err := cmd.Flags().GetInt("num-workers")
if err != nil {
return err
}
message, err := cmd.Flags().GetString("message")
if err != nil {
return err
}
pk, err := cmd.Flags().GetStringSlice("primary-key")
if err != nil {
return err
}
ff, err := getFastForward(cmd, c)
if err != nil {
return err
}
return runMerge(cmd, c, db, rs, args, noCommit, noGUI, ff, commitCSV, numWorkers, message, pk)
},
}
cmd.Flags().Bool("no-commit", false, "perform the merge but don't create a merge commit, instead output merge result to file MERGE_SUM1_SUM2_..._SUMn.csv")
cmd.Flags().Bool("no-gui", false, "don't show mergetool, instead output conflicts (and resolved rows) to file CONFLICTS_SUM1_SUM2_..._SUMn.csv")
cmd.Flags().String("commit-csv", "", "don't perform merge, just create a merge commit with the specified CSV file")
cmd.Flags().StringP("message", "m", "", "merge commit message")
cmd.Flags().StringSliceP("primary-key", "p", []string{}, "merge commit primary key. This is only used when --commit-csv is in use. If this isn't specified then primary key is the same as BRANCH HEAD's")
cmd.Flags().IntP("num-workers", "n", runtime.GOMAXPROCS(0), "number of CPU threads to utilize (default to GOMAXPROCS)")
cmd.Flags().Bool("ff", false, "when merging a descendant commit into a branch, don't create a merge commit but simply fast-forward branch to the descendant commit. Create an extra merge commit otherwise. This is the default behavior unless merge.fastForward is configured.")
cmd.Flags().Bool("no-ff", false, "always create a merge commit, even when a simple fast-forward is possible. This is the default when merge.fastFoward is set to \"never\".")
cmd.Flags().Bool("ff-only", false, "only allow fast-forward merges. This is the default when merge.fastForward is set to \"only\".")
return cmd
}
func getFastForward(cmd *cobra.Command, c *conf.Config) (conf.FastForward, error) {
defFF, err := cmd.Flags().GetBool("ff")
if err != nil {
return "", err
}
noFF, err := cmd.Flags().GetBool("no-ff")
if err != nil {
return "", err
}
ffOnly, err := cmd.Flags().GetBool("ff-only")
if err != nil {
return "", err
}
ff := c.MergeFastForward()
if defFF {
ff = conf.FF_Default
} else if noFF {
ff = conf.FF_Never
} else if ffOnly {
ff = conf.FF_Only
}
return ff, nil
}
func runMerge(
cmd *cobra.Command, c *conf.Config, db objects.Store, rs ref.Store, args []string, noCommit, noGUI bool,
ff conf.FastForward, commitCSV string, numWorkers int, message string, pk []string,
) error {
name, sum, _, err := ref.InterpretCommitName(db, rs, args[0], true)
if err != nil {
return err
}
if !strings.HasPrefix(name, "heads/") {
return fmt.Errorf("%q is not a branch name", args[0])
}
commits := [][]byte{sum}
commitNames := []string{displayableCommitName(args[0], sum)}
for _, s := range args[1:] {
_, sum, _, err := ref.InterpretCommitName(db, rs, s, true)
if err != nil {
return err
}
commits = append(commits, sum)
commitNames = append(commitNames, displayableCommitName(s, sum))
}
baseCommit, err := ref.SeekCommonAncestor(db, commits...)
if err != nil {
return err
}
nonAncestralCommits := [][]byte{}
for _, sum := range commits {
if !bytes.Equal(sum, baseCommit) {
nonAncestralCommits = append(nonAncestralCommits, sum)
}
}
if len(nonAncestralCommits) == 0 {
cmd.Println("All commits are identical, nothing to merge")
return nil
} else if len(nonAncestralCommits) == 1 {
if ff == conf.FF_Never {
com, err := objects.GetCommit(db, nonAncestralCommits[0])
if err != nil {
return err
}
return createMergeCommit(cmd, db, rs, commitNames, com.Table, commits, message, c)
}
err = ref.SaveRef(rs, name, nonAncestralCommits[0], c.User.Name, c.User.Email, "merge", "fast-forward")
if err != nil {
return err
}
cmd.Printf("Fast forward to %s\n", hex.EncodeToString(nonAncestralCommits[0])[:7])
return nil
} else if ff == conf.FF_Only {
return fmt.Errorf("merge rejected (non-fast-forward)")
}
commits = nonAncestralCommits
baseSum, baseT, err := getTable(db, baseCommit)
if err != nil {
return err
}
otherTs := make([]*objects.Table, len(commits))
otherSums := make([][]byte, len(commits))
for i, sum := range commits {
otherSums[i], otherTs[i], err = getTable(db, sum)
if err != nil {
return err
}
}
if len(pk) == 0 {
pk = otherTs[0].PrimaryKey()
}
if commitCSV != "" {
file, err := os.Open(commitCSV)
if err != nil {
return err
}
sortPT, blkPT := displayCommitProgress(cmd)
s, err := sorter.NewSorter(0, sortPT)
if err != nil {
return err
}
sum, err := ingest.IngestTable(db, s, file, pk,
ingest.WithNumWorkers(numWorkers),
ingest.WithProgressBar(blkPT),
)
if err != nil {
return err
}
return createMergeCommit(cmd, db, rs, commitNames, sum, commits, message, c)
}
buf, err := diff.BlockBufferWithSingleStore(db, append([]*objects.Table{baseT}, otherTs...))
if err != nil {
return err
}
rowCollector, cleanup, err := merge.CreateRowCollector(db, baseT)
if err != nil {
return err
}
defer cleanup()
merger, err := merge.NewMerger(db, rowCollector, buf, 65*time.Millisecond, baseT, otherTs, baseSum, otherSums)
if err != nil {
return err
}
defer merger.Close()
if noGUI {
return outputConflicts(cmd, db, buf, merger, commitNames, baseCommit, commits)
} else {
cd, merges, err := collectMergeConflicts(cmd, merger)
if err != nil {
return err
}
var removedCols map[int]struct{}
if len(merges) == 0 {
removedCols = map[int]struct{}{}
for _, layer := range cd.Removed {
for col := range layer {
removedCols[int(col)] = struct{}{}
}
}
} else {
removedCols, err = displayMergeApp(cmd, buf, merger, commitNames, commits, baseCommit, cd, merges)
if err != nil {
return err
}
}
if noCommit {
return saveMergeResultToCSV(cmd, merger, removedCols, commits)
} else {
return commitMergeResult(cmd, db, rs, merger, removedCols, numWorkers, commitNames, commits, message, c)
}
}
}
func outputConflicts(cmd *cobra.Command, db objects.Store, buf *diff.BlockBuffer, merger *merge.Merger, commitNames []string, baseSum []byte, commits [][]byte) error {
wd, err := os.Getwd()
if err != nil {
return err
}
filename := mergeCSVName("CONFLICTS", commits)
f, err := os.Create(path.Join(wd, filename))
if err != nil {
return err
}
defer f.Close()
w := csv.NewWriter(f)
baseName := fmt.Sprintf("BASE %s", hex.EncodeToString(baseSum)[:7])
names := make([]string, len(commitNames))
for i, name := range commitNames {
names[i] = fmt.Sprintf("%s (%s)", name, hex.EncodeToString(commits[i])[:7])
}
mc, err := merger.Start()
if err != nil {
return err
}
cd := (<-mc).ColDiff
columns := append([]string{""}, merger.Columns(nil)...)
err = w.Write(columns)
if err != nil {
return err
}
for i, name := range names {
row := make([]string, cd.Len()+1)
row[0] = "COLUMNS IN " + name
for j := 1; j < len(row); j++ {
if _, ok := cd.Added[i][uint32(j-1)]; ok {
row[j] = "NEW"
} else if _, ok := cd.Removed[i][uint32(j-1)]; ok {
row[j] = "REMOVED"
}
}
err = w.Write(row)
if err != nil {
return err
}
}
merges := []*merge.Merge{}
for m := range mc {
merges = append(merges, m)
}
// sort to make test stable
sort.SliceStable(merges, func(i, j int) bool {
if merges[i].Base == nil && merges[j].Base != nil {
return true
}
if merges[j].Base == nil && merges[i].Base != nil {
return false
}
return string(merges[i].Base) < string(merges[j].Base)
})
for _, m := range merges {
if m.Base != nil {
blk, off := diff.RowToBlockAndOffset(m.BaseOffset)
row, err := buf.GetRow(0, blk, off)
if err != nil {
return err
}
row = append([]string{baseName}, cd.RearrangeBaseRow(row)...)
err = w.Write(row)
if err != nil {
return err
}
}
for i, sum := range m.Others {
if sum == nil && m.Base != nil {
row := make([]string, cd.Len()+1)
row[0] = names[i]
txt := fmt.Sprintf("REMOVED IN %s", hex.EncodeToString(commits[i])[:7])
for j := 1; j < len(row); j++ {
row[j] = txt
}
err = w.Write(row)
if err != nil {
return err
}
} else if sum != nil {
blk, off := diff.RowToBlockAndOffset(m.OtherOffsets[i])
row, err := buf.GetRow(byte(i+1), blk, off)
if err != nil {
return err
}
row = cd.RearrangeRow(i, row)
row = append([]string{names[i]}, row...)
err = w.Write(row)
if err != nil {
return err
}
}
}
if len(m.ResolvedRow) > 0 {
row := append([]string{"RESOLUTION"}, m.ResolvedRow...)
err = w.Write(row)
if err != nil {
return err
}
}
err = merger.SaveResolvedRow(m.PK, nil)
if err != nil {
return err
}
}
if err = merger.Error(); err != nil {
return err
}
rc, _, err := merger.SortedRows(nil)
if err != nil {
return err
}
for blk := range rc {
for _, row := range blk.Rows {
row = append([]string{""}, row...)
err = w.Write(row)
if err != nil {
return err
}
}
}
err = merger.Error()
if err != nil {
return err
}
w.Flush()
err = f.Close()
if err != nil {
return err
}
cmd.Printf("saved conflicts to file %s\n", filename)
return nil
}
func mergeCSVName(prefix string, commits [][]byte) string {
sums := make([]string, len(commits))
for i, b := range commits {
sums[i] = hex.EncodeToString(b)[:7]
}
return fmt.Sprintf("%s_%s.csv", prefix, strings.Join(sums, "_"))
}
func saveMergeResultToCSV(cmd *cobra.Command, merger *merge.Merger, removedCols map[int]struct{}, commits [][]byte) error {
wd, err := os.Getwd()
if err != nil {
return err
}
name := path.Join(wd, mergeCSVName("MERGE", commits))
f, err := os.Create(name)
if err != nil {
return err
}
defer f.Close()
w := csv.NewWriter(f)
defer w.Flush()
err = w.Write(merger.Columns(removedCols))
if err != nil {
return err
}
blocks, _, err := merger.SortedRows(removedCols)
if err != nil {
return err
}
bar := pbar(-1, fmt.Sprintf("saving merge result to %s", name), cmd.OutOrStdout(), cmd.ErrOrStderr())
for blk := range blocks {
for _, row := range blk.Rows {
err = w.Write(row)
if err != nil {
return err
}
err = bar.Add(1)
if err != nil {
return err
}
}
}
return bar.Finish()
}
func displayCommitProgress(cmd *cobra.Command) (sortPT, blkPT *progressbar.ProgressBar) {
sortPT = pbar(-1, "sorting", cmd.OutOrStdout(), cmd.OutOrStderr())
blkPT = pbar(-1, "saving blocks", cmd.OutOrStdout(), cmd.OutOrStderr())
return
}
func commitMergeResult(
cmd *cobra.Command,
db objects.Store,
rs ref.Store,
merger *merge.Merger,
removedCols map[int]struct{},
numWorkers int,
commitNames []string,
commits [][]byte,
message string,
c *conf.Config,
) error {
columns := merger.Columns(removedCols)
pk, err := slice.KeyIndices(columns, merger.PK())
if err != nil {
return err
}
blocks, rowsCount, err := merger.SortedBlocks(removedCols)
if err != nil {
return err
}
blkPT := pbar(-1, "saving blocks", cmd.OutOrStdout(), cmd.OutOrStderr())
s, err := sorter.NewSorter(0, nil)
if err != nil {
return err
}
sum, err := ingest.IngestTableFromBlocks(db, s, columns, pk, rowsCount, blocks,
ingest.WithNumWorkers(numWorkers),
ingest.WithProgressBar(blkPT),
)
if err != nil {
return err
}
tbl, err := objects.GetTable(db, sum)
if err != nil {
return err
}
if err = ingest.ProfileTable(db, sum, tbl); err != nil {
return err
}
return createMergeCommit(cmd, db, rs, commitNames, sum, commits, message, c)
}
func createMergeCommit(cmd *cobra.Command, db objects.Store, rs ref.Store, commitNames []string, sum []byte, parents [][]byte, message string, c *conf.Config) error {
if message == "" {
quotedNames := []string{}
for _, name := range commitNames[1:] {
quotedNames = append(quotedNames, fmt.Sprintf("%q", name))
}
message = fmt.Sprintf("Merge %s into %q", strings.Join(quotedNames, ", "), commitNames[0])
}
commit := &objects.Commit{
Table: sum,
Message: message,
Time: time.Now(),
AuthorEmail: c.User.Email,
AuthorName: c.User.Name,
Parents: parents,
}
buf := bytes.NewBuffer(nil)
_, err := commit.WriteTo(buf)
if err != nil {
return err
}
commitSum, err := objects.SaveCommit(db, buf.Bytes())
if err != nil {
return err
}
err = ref.CommitMerge(rs, commitNames[0], commitSum, commit)
if err != nil {
return err
}
cmd.Printf("[%s %s] %s\n", commitNames[0], hex.EncodeToString(commitSum)[:7], message)
return nil
}
func redrawEvery(app *tview.Application, d time.Duration) (cancel func()) {
drawCtx, cancel := context.WithCancel(context.Background())
ticker := time.NewTicker(d)
go func() {
for {
select {
case <-ticker.C:
app.Draw()
case <-drawCtx.Done():
return
}
}
}()
return cancel
}
func collectMergeConflicts(cmd *cobra.Command, merger *merge.Merger) (*diff.ColDiff, []*merge.Merge, error) {
|
func displayMergeApp(cmd *cobra.Command, buf *diff.BlockBuffer, merger *merge.Merger, commitNames []string, commitSums [][]byte, baseSum []byte, cd *diff.ColDiff, merges []*merge.Merge) (map[int]struct{}, error) {
app := tview.NewApplication()
mergeApp := widgets.NewMergeApp(buf, merger, app, commitNames, commitSums, baseSum)
mergeApp.InitializeTable(cd, merges)
app.SetRoot(mergeApp.Flex, true).
SetFocus(mergeApp.Table).
SetBeforeDrawFunc(func(screen tcell.Screen) bool {
mergeApp.BeforeDraw(screen)
return false
}).
EnableMouse(true)
cancel := redrawEvery(app, 65*time.Millisecond)
defer cancel()
err := app.Run()
if err != nil {
return nil, err
}
if !mergeApp.Finished {
cmd.Println("merge aborted")
os.Exit(0)
}
return mergeApp.RemovedCols, nil
}
|
var bar *progressbar.ProgressBar
mch, err := merger.Start()
if err != nil {
return nil, nil, err
}
pch := merger.Progress.Start()
merges := []*merge.Merge{}
mainLoop:
for {
select {
case p := <-pch:
if bar == nil {
bar = pbar(p.Total, "collecting merge conflicts", cmd.OutOrStdout(), cmd.OutOrStderr())
}
bar.Set64(p.Progress)
case m, ok := <-mch:
if !ok {
break mainLoop
}
merges = append(merges, m)
}
}
merger.Progress.Stop()
if bar != nil {
if err = bar.Finish(); err != nil {
return nil, nil, err
}
}
if err = merger.Error(); err != nil {
return nil, nil, err
}
return merges[0].ColDiff, merges[1:], nil
}
|
main.js
|
import 'pixi'
import Phaser from 'phaser'
import BootState from './states/Boot.js'
import InGameState from './states/InGameState.js'
import Level from './managers/level.js'
var game = new Phaser.Game( 5000, 5000, Phaser.AUTO, '');
var bootState = new BootState( game );
var inGameState = new InGameState( game );
var level = new Level( game )
|
game.state.add( 'level', level )
game.state.start( 'BootState' )
|
game.state.add( 'BootState', bootState )
game.state.add( 'InGameState', inGameState )
|
storage-put.go
|
// Copyright 2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"crypto/rand"
"fmt"
"os"
"runtime/pprof"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/spf13/cobra"
"github.com/coreos/etcd/lease"
)
// storagePutCmd represents a storage put performance benchmarking tool
var storagePutCmd = &cobra.Command{
Use: "put",
Short: "Benchmark put performance of storage",
Run: storagePutFunc,
}
var (
totalNrKeys int
storageKeySize int
valueSize int
txn bool
)
func init() {
storageCmd.AddCommand(storagePutCmd)
storagePutCmd.Flags().IntVar(&totalNrKeys, "total", 100, "a total number of keys to put")
storagePutCmd.Flags().IntVar(&storageKeySize, "key-size", 64, "a size of key (Byte)")
storagePutCmd.Flags().IntVar(&valueSize, "value-size", 64, "a size of value (Byte)")
storagePutCmd.Flags().BoolVar(&txn, "txn", false, "put a key in transaction or not")
// TODO: after the PR https://github.com/spf13/cobra/pull/220 is merged, the below pprof related flags should be moved to RootCmd
storagePutCmd.Flags().StringVar(&cpuProfPath, "cpuprofile", "", "the path of file for storing cpu profile result")
storagePutCmd.Flags().StringVar(&memProfPath, "memprofile", "", "the path of file for storing heap profile result")
}
func createBytesSlice(bytesN, sliceN int) [][]byte {
rs := make([][]byte, sliceN)
for i := range rs {
rs[i] = make([]byte, bytesN)
if _, err := rand.Read(rs[i]); err != nil {
panic(err)
}
}
return rs
}
func storagePutFunc(cmd *cobra.Command, args []string) {
if cpuProfPath != "" {
f, err := os.Create(cpuProfPath)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to create a file for storing cpu profile result: ", err)
os.Exit(1)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to start cpu profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if memProfPath != "" {
f, err := os.Create(memProfPath)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to create a file for storing heap profile result: ", err)
os.Exit(1)
}
defer func() {
err := pprof.WriteHeapProfile(f)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to write heap profile result: ", err)
// can do nothing for handling the error
}
}()
}
keys := createBytesSlice(storageKeySize, totalNrKeys)
vals := createBytesSlice(valueSize, totalNrKeys)
latencies := make([]time.Duration, totalNrKeys)
minLat := time.Duration(1<<63 - 1)
maxLat := time.Duration(0)
|
for i := 0; i < totalNrKeys; i++ {
begin := time.Now()
if txn {
id := s.TxnBegin()
if _, err := s.TxnPut(id, keys[i], vals[i], lease.NoLease); err != nil {
fmt.Fprintln(os.Stderr, "txn put error:", err)
os.Exit(1)
}
s.TxnEnd(id)
} else {
s.Put(keys[i], vals[i], lease.NoLease)
}
end := time.Now()
lat := end.Sub(begin)
latencies[i] = lat
if maxLat < lat {
maxLat = lat
}
if lat < minLat {
minLat = lat
}
}
total := time.Duration(0)
for _, lat := range latencies {
total += lat
}
fmt.Printf("total: %v\n", total)
fmt.Printf("average: %v\n", total/time.Duration(totalNrKeys))
fmt.Printf("rate: %4.4f\n", float64(totalNrKeys)/total.Seconds())
fmt.Printf("minimum latency: %v\n", minLat)
fmt.Printf("maximum latency: %v\n", maxLat)
// TODO: Currently this benchmark doesn't use the common histogram infrastructure.
// This is because an accuracy of the infrastructure isn't suitable for measuring
// performance of kv storage:
// https://github.com/coreos/etcd/pull/4070#issuecomment-167954149
}
| |
messages_tl.py
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "",
"(active)": "",
"Also available in:": "",
"Archive": "",
"Categories": "",
"Comments": "",
"LANGUAGE": "Ingles",
"Languages:": "Mga Wika:",
"More posts about %s": "",
"Newer posts": "",
"Next post": "Susunod",
"No posts found.": "",
"Nothing found.": "",
"Older posts": "",
"Original site": "",
"Posted:": "",
"Posts about %s": "",
"Posts for year %s": "",
"Posts for {month} {day}, {year}": "",
"Posts for {month} {year}": "",
"Previous post": "",
"Publication date": "",
"RSS feed": "",
"Read in English": "",
"Read more": "",
"Skip to main content": "",
"Source": "",
"Subcategories:": "",
"Tags and Categories": "",
"Tags": "Mga Tag",
"Write your page here.": "",
"Write your post here.": "",
"old posts, page %d": "",
|
"page %d": "",
}
| |
yambo.py
|
# -*- coding: utf-8 -*-
"""
Plugin to create a Yambo input file and run a calculation with the yambo executable.
"""
from __future__ import absolute_import
import os
import six
from aiida.engine import CalcJob
from aiida_quantumespresso.calculations import _lowercase_dict, _uppercase_dict
from aiida.common.datastructures import CalcInfo
from aiida.common.datastructures import CalcJobState
from aiida.common.exceptions import UniquenessError, InputValidationError, ValidationError
from aiida.common.utils import classproperty
from aiida.orm import Code
from aiida.orm.nodes import Dict
from aiida.orm.nodes import RemoteData, BandsData, ArrayData
from aiida.plugins import DataFactory, CalculationFactory
from aiida.common import AIIDA_LOGGER
from aiida.common import LinkType
from aiida_yambo.utils.common_helpers import *
PwCalculation = CalculationFactory('quantumespresso.pw')
__authors__ = " Miki Bonacci (miki.bonacci@unimore.it)," \
" Gianluca Prandini (gianluca.prandini@epfl.ch)," \
" Antimo Marrazzo (antimo.marrazzo@epfl.ch)," \
" Michael Atambo (michaelontita.atambo@unimore.it)."
class YamboCalculation(CalcJob):
"""
AiiDA plugin for the Yambo code.
For more information, refer to http://www.yambo-code.org/
https://github.com/yambo-code/yambo-aiida and http://aiida-yambo.readthedocs.io/en/latest/
"""
# Default input and output files
_DEFAULT_INPUT_FILE = 'aiida.in'
_DEFAULT_OUTPUT_FILE = 'aiida.out'
@classmethod
def define(cls,spec):
super(YamboCalculation, cls).define(spec)
spec.input('metadata.options.input_filename', valid_type=six.string_types, default=cls._DEFAULT_INPUT_FILE)
spec.input('metadata.options.output_filename', valid_type=six.string_types, default=cls._DEFAULT_OUTPUT_FILE)
# Default output parser provided by AiiDA
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='yambo.yambo')
# self._SCRATCH_FOLDER = 'SAVE'
spec.input('metadata.options.scratch_folder', valid_type=six.string_types, default='SAVE')
spec.input('metadata.options.logostring', valid_type=six.string_types, default="""
#
# Y88b / e e e 888~~\ ,88~-_
# Y88b / d8b d8b d8b 888 | d888 \
# Y88b/ /Y88b d888bdY88b 888 _/ 88888 |
# Y8Y / Y88b / Y88Y Y888b 888 \ 88888 |
# Y /____Y88b / YY Y888b 888 | Y888 /
# / / Y88b / Y888b 888__/ `88_-~
#
# AIIDA input plugin. YAMBO 4.x compatible
# http://www.yambo-code.org
#
"""
)
spec.input('settings',valid_type=Dict,
help='Use an additional node for special settings')
spec.input('parameters',valid_type=Dict,
help='Use a node that specifies the input parameters')
spec.input('parent_folder',valid_type=RemoteData,
help='Use a remote folder as parent folder (for "restarts and similar"')
spec.input('preprocessing_code',valid_type=Code,
help='Use a preprocessing code for starting yambo',required=False)
spec.input('precode_parameters',valid_type=Dict,
help='Use a node that specifies the input parameters for the yambo precode',required=False)
spec.input('code',valid_type=Code,
help='Use a main code for yambo calculation')
spec.exit_code(500, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(501, 'WALLTIME_ERROR',
message='time exceeded the max walltime')
spec.exit_code(502, 'NO_SUCCESS',
message='failed calculation for some reason: could be a low number of conduction bands')
spec.exit_code(503, 'PARSER_ANOMALY',
message='Unexpected behavior of YamboFolder')
spec.exit_code(504, 'PARA_ERROR',
message='parallelization error')
spec.exit_code(505, 'MEMORY_ERROR',
message='general memory error')
spec.exit_code(506, 'X_par_MEMORY_ERROR',
message='x_par allocation memory error')
#outputs definition:
spec.output('output_parameters', valid_type=Dict,
required=True, help='returns the output parameters')
spec.output('array_alpha', valid_type=ArrayData,
required=False, help='returns the alpha array')
spec.output('array_alpha_bands', valid_type=ArrayData,
required=False, help='returns the alpha array bands')
spec.output('array_alpha_array', valid_type=ArrayData,
required=False, help='returns the alpha array')
spec.output('bands_quasiparticle', valid_type=BandsData,
required=False, help='returns the quasiparticle band structure')
spec.output('array_qp', valid_type=ArrayData,
required=False, help='returns the quasiparticle array band structure')
spec.output('array_eels', valid_type=ArrayData,
required=False, help='returns the eels array')
spec.output('array_eps', valid_type=ArrayData,
required=False, help='returns the eps array')
spec.output('array_ndb', valid_type=ArrayData,
required=False, help='returns the array for ndb')
spec.output('array_ndb_QP', valid_type=ArrayData,
required=False, help='returns the array for ndbQP')
spec.output('array_ndb_HFlocXC', valid_type=ArrayData,
required=False, help='returns the array ndb for HFlocXC')
spec.output('system_info', valid_type=Dict,
required=False, help='returns some system information after a p2y')
def prepare_for_submission(self, tempfolder):
_dbs_accepted = {'gw0': 'ndb.QP', 'HF_and_locXC': 'ndb.HF_and_locXC',}
local_copy_list = []
remote_copy_list = []
remote_symlink_list = []
# Settings can be undefined, and defaults to an empty dictionary.
# They will be used for any input that doen't fit elsewhere.
settings = self.inputs.settings.get_dict()
initialise = settings.pop('INITIALISE', None)
if initialise is not None:
if not isinstance(initialise, bool):
raise InputValidationError("INITIALISE must be " " a boolean")
copy_save = settings.pop('COPY_SAVE', None)
if copy_save is not None:
if not isinstance(copy_save, bool):
raise InputValidationError("COPY_SAVE must be " " a boolean")
copy_dbs = settings.pop('COPY_DBS', None)
if copy_dbs is not None:
if not isinstance(copy_dbs, bool):
raise InputValidationError("COPY_DBS must be " " a boolean")
restart_yambo = settings.pop('RESTART_YAMBO', None)
if restart_yambo is not None:
if not isinstance(restart_yambo, bool):
raise InputValidationError("RESTART_YAMBO must be " " a boolean")
parameters = self.inputs.parameters
if not initialise:
if not isinstance(parameters, Dict):
raise InputValidationError("parameters is not of type Dict")
parent_calc_folder = self.inputs.parent_folder
main_code = self.inputs.code
preproc_code = self.inputs.preprocessing_code
parent_calc = take_calc_from_remote(parent_calc_folder)
if parent_calc.process_type=='aiida.calculations:yambo.yambo':
yambo_parent=True
else:
yambo_parent=False
# flags for yambo interfaces
try:
precode_param_dict = self.inputs.precode_parameters
except:
precode_param_dict = Dict(dict={})
# check the precode parameters given in input
input_cmdline = settings.pop('CMDLINE', None)
import re
precode_params_list = [] #['cd aiida.save'] ##.format(parent_calc_folder._PREFIX)
pattern = re.compile(r"(^\-)([a-zA-Z])")
for key, value in six.iteritems(precode_param_dict.get_dict()):
if re.search(pattern, key) is not None:
if key == '-O' or key == '-H' or key == '-h' or key == '-F':
raise InputValidationError(
"Precode flag {} is not allowed".format(str(key)))
else:
if precode_param_dict[key] is True:
precode_params_list.append(str(key))
elif precode_param_dict[key] is False:
pass
else:
precode_params_list.append('{}'.format(str(key)))
precode_params_list.append('{}'.format(str(value)))
else:
raise InputValidationError(
"Wrong format of precode_parameters")
# Adding manual cmdline input (e.g. for DB fragmentation)
if input_cmdline is not None:
precode_params_list = precode_params_list + input_cmdline
# TODO: check that remote data must be on the same computer
##############################
# END OF INITIAL INPUT CHECK #
##############################
if not initialise:
###################################################
# Prepare yambo input file
###################################################
params_dict = parameters.get_dict()
# extract boolean keys
boolean_dict = {
k: v
for k, v in six.iteritems(params_dict) if isinstance(v, bool)
}
params_dict = {
k: v
for k, v in six.iteritems(params_dict)
if k not in list(boolean_dict.keys())
}
# reorganize the dictionary and create a list of dictionaries with key, value and units
parameters_list = []
for k, v in six.iteritems(params_dict):
if "_units" in k:
continue
units_key = "{}_units".format(k)
try:
units = params_dict[units_key]
except KeyError:
units = None
this_dict = {}
this_dict['key'] = k
this_dict['value'] = v
this_dict['units'] = units
parameters_list.append(this_dict)
input_filename = tempfolder.get_abs_path(self.metadata.options.input_filename)
with open(input_filename, 'w') as infile:
infile.write(self.metadata.options.logostring)
for k, v in six.iteritems(boolean_dict):
if v:
infile.write("{}\n".format(k))
for this_dict in parameters_list:
key = this_dict['key']
value = this_dict['value']
units = this_dict['units']
if isinstance(value, list):
value_string = ''
try:
for v in value:
value_string += " | ".join([str(_) for _ in v]) + " |\n"
except:
value_string += " | ".join([str(_) for _ in value]) + " |\n"
the_string = "% {}\n {}".format(key, value_string)
the_string += "%"
else:
the_value = '"{}"'.format(value) if isinstance(
value, six.string_types) else '{}'.format(value)
the_string = "{} = {}".format(key, the_value)
if units is not None:
the_string += " {}".format(units)
infile.write(the_string + "\n")
############################################
# set copy of the parent calculation
############################################
try:
parent_calc = parent_calc_folder.get_incoming().all_nodes()[-1] #to load the node from a workchain...
except:
parent_calc = parent_calc_folder.get_incoming().get_node_by_label('remote_folder')
if yambo_parent:
if copy_save:
try:
remote_copy_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/SAVE/",'./SAVE/'))
except:
remote_copy_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"out/aiida.save/SAVE/",'./SAVE/'))
else:
try:
remote_symlink_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/SAVE/",'./SAVE/'))
except:
remote_symlink_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"out/aiida.save/SAVE/",'./SAVE/'))
if copy_dbs:
remote_copy_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/aiida.out/",'./aiida.out/'))
if restart_yambo:
remote_symlink_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/aiida.out/",'./aiida.out/'))
else:
remote_copy_list.append(
(parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
PwCalculation._OUTPUT_SUBFOLDER,
"aiida.save","*" ), ##.format(parent_calc_folder._PREFIX)
"."
)
)
############################################
# set Calcinfo
############################################
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
calcinfo.local_copy_list = []
calcinfo.remote_copy_list = remote_copy_list
calcinfo.remote_symlink_list = remote_symlink_list
# Retrieve by default the output file and the xml file
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append('r*')
calcinfo.retrieve_list.append('l*')
calcinfo.retrieve_list.append('o*')
calcinfo.retrieve_list.append('LOG/l*_CPU_1')
calcinfo.retrieve_list.append('LOG/l*_CPU_2')
calcinfo.retrieve_list.append('*stderr*') #standard errors
extra_retrieved = []
if initialise:
# extra_retrieved.append('SAVE/'+_dbs_accepted['ns.db1'])
pass
else:
for dbs in _dbs_accepted.keys():
db = boolean_dict.pop(dbs,False)
if db:
extra_retrieved.append('aiida.out/'+_dbs_accepted[dbs])
additional = settings.pop('ADDITIONAL_RETRIEVE_LIST',[])
if additional:
extra_retrieved.append(additional)
for extra in extra_retrieved:
calcinfo.retrieve_list.append(extra)
from aiida.common.datastructures import CodeRunMode, CodeInfo
# c1 = interface dft codes and yambo (ex. p2y or a2y)
c1 = CodeInfo()
c1.withmpi = True
c1.cmdline_params = precode_params_list
# c2 = yambo initialization
c2 = CodeInfo()
c2.withmpi = True
c2.cmdline_params = []
c2.code_uuid = main_code.uuid
# if the parent calculation is a yambo calculation skip the interface (c1) and the initialization (c2)
if yambo_parent:
try:
parent_settings = _uppercase_dict(
parent_calc.inputs.settings.get_dict(),
dict_name='parent settings')
parent_initialise = parent_settings['INITIALISE']
except KeyError:
parent_initialise = False
c1 = None
if not parent_initialise:
c2 = None
else:
c1.cmdline_params = precode_params_list
c1.code_uuid = preproc_code.uuid
# c3 = yambo calculation
c3 = CodeInfo()
c3.withmpi = True
#c3.withmpi = self.get_withmpi()
c3.cmdline_params = [
"-F", self.metadata.options.input_filename, \
'-J', self.metadata.options.output_filename, \
]
c3.code_uuid = main_code.uuid
if initialise:
c2 = None
c3 = None
#logic of the execution
#calcinfo.codes_info = [c1, c2, c3] if not yambo_parent else [c3]
if yambo_parent:
if not parent_initialise:
|
calcinfo.codes_info = [c3]
else:
calcinfo.codes_info = [c2, c3]
elif initialise:
calcinfo.codes_info = [c1]
else:
calcinfo.codes_info = [c1, c2, c3]
calcinfo.codes_run_mode = CodeRunMode.SERIAL
if settings:
raise InputValidationError(
"The following keys have been found in "
"the settings input node, but were not understood: {}".format(
",".join(list(settings.keys()))))
return calcinfo
################################################################################
#the following functions are not used
def _check_valid_parent(self, calc):
"""
Check that calc is a valid parent for a YamboCalculation.
It can be a PwCalculation or a YamboCalculation.
"""
try:
if ((not isinstance(calc, PwCalculation))
and (not isinstance(calc, YamboCalculation))):
raise ValueError(
"Parent calculation must be a PwCalculation or a YamboCalculation"
)
except ImportError:
if ((not isinstance(calc, PwCalculation))
and (not isinstance(calc, YamboCalculation))):
raise ValueError(
"Parent calculation must be a PwCalculation or a YamboCalculation"
)
def use_parent_calculation(self, calc):
"""
Set the parent calculation of Yambo,
from which it will inherit the outputsubfolder.
The link will be created from parent RemoteData to YamboCalculation
"""
from aiida.common.exceptions import NotExistent
self._check_valid_parent(calc)
remotedatas = calc.get_outputs(node_type=RemoteData)
if not remotedatas:
raise NotExistent("No output remotedata found in " "the parent")
if len(remotedatas) != 1:
raise UniquenessError("More than one output remotedata found in "
"the parent")
remotedata = remotedatas[0]
self._set_parent_remotedata(remotedata)
def _set_parent_remotedata(self, remotedata):
"""
Used to set a parent remotefolder in the start of Yambo.
"""
if not isinstance(remotedata, RemoteData):
raise ValueError('remotedata must be a RemoteData')
# complain if another remotedata is already found
input_remote = self.get_inputs(node_type=RemoteData)
if input_remote:
raise ValidationError("Cannot set several parent calculation to a "
"Yambo calculation")
self.use_parent_folder(remotedata)
| |
dep_node.rs
|
//! This module defines the `DepNode` type which the compiler uses to represent
//! nodes in the dependency graph. A `DepNode` consists of a `DepKind` (which
//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc)
//! and a `Fingerprint`, a 128 bit hash value the exact meaning of which
//! depends on the node's `DepKind`. Together, the kind and the fingerprint
//! fully identify a dependency node, even across multiple compilation sessions.
//! In other words, the value of the fingerprint does not depend on anything
//! that is specific to a given compilation session, like an unpredictable
//! interning key (e.g., NodeId, DefId, Symbol) or the numeric value of a
//! pointer. The concept behind this could be compared to how git commit hashes
//! uniquely identify a given commit and has a few advantages:
//!
//! * A `DepNode` can simply be serialized to disk and loaded in another session
//! without the need to do any "rebasing (like we have to do for Spans and
//! NodeIds) or "retracing" like we had to do for `DefId` in earlier
//! implementations of the dependency graph.
//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
//! implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
//! memory without any post-processing (e.g., "abomination-style" pointer
//! reconstruction).
//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
//! refer to things that do not exist anymore. In previous implementations
//! `DepNode` contained a `DefId`. A `DepNode` referring to something that
//! had been removed between the previous and the current compilation session
//! could not be instantiated because the current compilation session
//! contained no `DefId` for thing that had been removed.
//!
//! `DepNode` definition happens in the `define_dep_nodes!()` macro. This macro
//! defines the `DepKind` enum and a corresponding `DepConstructor` enum. The
//! `DepConstructor` enum links a `DepKind` to the parameters that are needed at
//! runtime in order to construct a valid `DepNode` fingerprint.
//!
//! Because the macro sees what parameters a given `DepKind` requires, it can
//! "infer" some properties for each kind of `DepNode`:
//!
//! * Whether a `DepNode` of a given kind has any parameters at all. Some
//! `DepNode`s, like `Krate`, represent global concepts with only one value.
//! * Whether it is possible, in principle, to reconstruct a query key from a
//! given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
//! in which case it is possible to map the node's fingerprint back to the
//! `DefId` it was computed from. In other cases, too much information gets
//! lost during fingerprint computation.
//!
//! The `DepConstructor` enum, together with `DepNode::new()` ensures that only
//! valid `DepNode` instances can be constructed. For example, the API does not
//! allow for constructing parameterless `DepNode`s with anything other
//! than a zeroed out fingerprint. More generally speaking, it relieves the
//! user of the `DepNode` API of having to know how to compute the expected
//! fingerprint for a given set of node parameters.
use crate::mir;
use crate::mir::interpret::GlobalId;
use crate::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX};
use crate::hir::map::DefPathHash;
use crate::hir::HirId;
use crate::ich::{Fingerprint, StableHashingContext};
use rustc_data_structures::stable_hasher::{StableHasher, HashStable};
use std::fmt;
use std::hash::Hash;
use syntax_pos::symbol::InternedString;
use crate::traits;
use crate::traits::query::{
CanonicalProjectionGoal, CanonicalTyGoal, CanonicalTypeOpAscribeUserTypeGoal,
CanonicalTypeOpEqGoal, CanonicalTypeOpSubtypeGoal, CanonicalPredicateGoal,
CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpNormalizeGoal,
};
use crate::ty::{self, TyCtxt, ParamEnvAnd, Ty};
use crate::ty::subst::SubstsRef;
// erase!() just makes tokens go away. It's used to specify which macro argument
// is repeated (i.e., which sub-expression of the macro we are in) but don't need
// to actually use any of the arguments.
macro_rules! erase {
($x:tt) => ({})
}
macro_rules! replace {
($x:tt with $($y:tt)*) => ($($y)*)
}
macro_rules! is_anon_attr {
(anon) => (true);
($attr:ident) => (false);
}
macro_rules! is_eval_always_attr {
(eval_always) => (true);
($attr:ident) => (false);
}
macro_rules! contains_anon_attr {
($($attr:ident),*) => ({$(is_anon_attr!($attr) | )* false});
}
macro_rules! contains_eval_always_attr {
($($attr:ident),*) => ({$(is_eval_always_attr!($attr) | )* false});
}
macro_rules! define_dep_nodes {
(<$tcx:tt>
$(
[$($attr:ident),* ]
$variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
$({ $($struct_arg_name:ident : $struct_arg_ty:ty),* })*
,)*
) => (
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
RustcEncodable, RustcDecodable)]
pub enum DepKind {
$($variant),*
}
impl DepKind {
#[allow(unreachable_code)]
#[inline]
pub fn can_reconstruct_query_key<$tcx>(&self) -> bool {
match *self {
$(
DepKind :: $variant => {
if contains_anon_attr!($($attr),*) {
return false;
}
// tuple args
$({
return <$tuple_arg_ty as DepNodeParams>
::CAN_RECONSTRUCT_QUERY_KEY;
})*
// struct args
$({
return <( $($struct_arg_ty,)* ) as DepNodeParams>
::CAN_RECONSTRUCT_QUERY_KEY;
})*
true
}
)*
}
}
// FIXME: Make `is_anon`, `is_eval_always` and `has_params` properties
// of queries
#[inline(always)]
pub fn is_anon(&self) -> bool {
match *self {
$(
DepKind :: $variant => { contains_anon_attr!($($attr),*) }
)*
}
}
#[inline(always)]
pub fn is_eval_always(&self) -> bool {
match *self {
$(
DepKind :: $variant => { contains_eval_always_attr!($($attr), *) }
)*
}
}
#[allow(unreachable_code)]
#[inline(always)]
pub fn has_params(&self) -> bool {
match *self {
$(
DepKind :: $variant => {
// tuple args
$({
erase!($tuple_arg_ty);
return true;
})*
// struct args
$({
$(erase!($struct_arg_name);)*
return true;
})*
false
}
)*
}
}
}
pub enum DepConstructor<$tcx> {
$(
$variant $(( $tuple_arg_ty ))*
$({ $($struct_arg_name : $struct_arg_ty),* })*
),*
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash,
RustcEncodable, RustcDecodable)]
pub struct DepNode {
pub kind: DepKind,
pub hash: Fingerprint,
}
impl DepNode {
#[allow(unreachable_code, non_snake_case)]
#[inline(always)]
pub fn new<'a, 'tcx>(tcx: TyCtxt<'tcx>,
dep: DepConstructor<'tcx>)
-> DepNode
where 'tcx: 'a,
'tcx: 'a
{
match dep {
$(
DepConstructor :: $variant $(( replace!(($tuple_arg_ty) with arg) ))*
$({ $($struct_arg_name),* })*
=>
{
// tuple args
$({
erase!($tuple_arg_ty);
let hash = DepNodeParams::to_fingerprint(&arg, tcx);
let dep_node = DepNode {
kind: DepKind::$variant,
hash
};
if cfg!(debug_assertions) &&
!dep_node.kind.can_reconstruct_query_key() &&
(tcx.sess.opts.debugging_opts.incremental_info ||
tcx.sess.opts.debugging_opts.query_dep_graph)
{
tcx.dep_graph.register_dep_node_debug_str(dep_node, || {
arg.to_debug_str(tcx)
});
}
return dep_node;
})*
// struct args
$({
let tupled_args = ( $($struct_arg_name,)* );
let hash = DepNodeParams::to_fingerprint(&tupled_args,
tcx);
let dep_node = DepNode {
kind: DepKind::$variant,
hash
};
if cfg!(debug_assertions) &&
!dep_node.kind.can_reconstruct_query_key() &&
(tcx.sess.opts.debugging_opts.incremental_info ||
tcx.sess.opts.debugging_opts.query_dep_graph)
{
tcx.dep_graph.register_dep_node_debug_str(dep_node, || {
tupled_args.to_debug_str(tcx)
});
}
return dep_node;
})*
DepNode {
kind: DepKind::$variant,
hash: Fingerprint::ZERO,
}
}
)*
}
}
/// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter.
#[inline(always)]
pub fn from_def_path_hash(kind: DepKind,
def_path_hash: DefPathHash)
-> DepNode {
debug_assert!(kind.can_reconstruct_query_key() && kind.has_params());
DepNode {
kind,
hash: def_path_hash.0,
}
}
/// Creates a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters.
#[inline(always)]
pub fn new_no_params(kind: DepKind) -> DepNode {
debug_assert!(!kind.has_params());
DepNode {
kind,
hash: Fingerprint::ZERO,
}
}
/// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met:
///
/// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
/// 2. the item that the DefPath refers to exists in the current tcx.
///
/// Condition (1) is determined by the DepKind variant of the
/// DepNode. Condition (2) might not be fulfilled if a DepNode
/// refers to something from the previous compilation session that
/// has been removed.
#[inline]
pub fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
if self.kind.can_reconstruct_query_key() {
let def_path_hash = DefPathHash(self.hash);
tcx.def_path_hash_to_def_id.as_ref()?
.get(&def_path_hash).cloned()
} else {
None
}
}
/// Used in testing
pub fn from_label_string(label: &str,
def_path_hash: DefPathHash)
-> Result<DepNode, ()> {
let kind = match label {
$(
stringify!($variant) => DepKind::$variant,
)*
_ => return Err(()),
};
if !kind.can_reconstruct_query_key() {
return Err(());
}
if kind.has_params() {
Ok(def_path_hash.to_dep_node(kind))
} else {
Ok(DepNode::new_no_params(kind))
}
}
/// Used in testing
pub fn has_label_string(label: &str) -> bool {
match label {
$(
stringify!($variant) => true,
)*
_ => false,
}
}
}
/// Contains variant => str representations for constructing
/// DepNode groups for tests.
#[allow(dead_code, non_upper_case_globals)]
pub mod label_strs {
$(
pub const $variant: &str = stringify!($variant);
)*
}
);
}
impl fmt::Debug for DepNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.kind)?;
if !self.kind.has_params() && !self.kind.is_anon() {
return Ok(());
}
write!(f, "(")?;
crate::ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx
|
else {
write!(f, "{}", self.hash)?;
}
Ok(())
})?;
write!(f, ")")
}
}
impl DefPathHash {
#[inline(always)]
pub fn to_dep_node(self, kind: DepKind) -> DepNode {
DepNode::from_def_path_hash(kind, self)
}
}
impl DefId {
#[inline(always)]
pub fn to_dep_node(self, tcx: TyCtxt<'_>, kind: DepKind) -> DepNode {
DepNode::from_def_path_hash(kind, tcx.def_path_hash(self))
}
}
rustc_dep_node_append!([define_dep_nodes!][ <'tcx>
// We use this for most things when incr. comp. is turned off.
[] Null,
// Represents the `Krate` as a whole (the `hir::Krate` value) (as
// distinct from the krate module). This is basically a hash of
// the entire krate, so if you read from `Krate` (e.g., by calling
// `tcx.hir().krate()`), we will have to assume that any change
// means that you need to be recompiled. This is because the
// `Krate` value gives you access to all other items. To avoid
// this fate, do not call `tcx.hir().krate()`; instead, prefer
// wrappers like `tcx.visit_all_items_in_krate()`. If there is no
// suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain
// access to the krate, but you must remember to add suitable
// edges yourself for the individual items that you read.
[eval_always] Krate,
// Represents the body of a function or method. The def-id is that of the
// function/method.
[eval_always] HirBody(DefId),
// Represents the HIR node with the given node-id
[eval_always] Hir(DefId),
// Represents metadata from an extern crate.
[eval_always] CrateMetadata(CrateNum),
[eval_always] AllLocalTraitImpls,
[anon] TraitSelect,
[] CompileCodegenUnit(InternedString),
[eval_always] Analysis(CrateNum),
]);
pub trait RecoverKey<'tcx>: Sized {
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self>;
}
impl RecoverKey<'tcx> for CrateNum {
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
dep_node.extract_def_id(tcx).map(|id| id.krate)
}
}
impl RecoverKey<'tcx> for DefId {
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
dep_node.extract_def_id(tcx)
}
}
impl RecoverKey<'tcx> for DefIndex {
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
dep_node.extract_def_id(tcx).map(|id| id.index)
}
}
trait DepNodeParams<'tcx>: fmt::Debug {
const CAN_RECONSTRUCT_QUERY_KEY: bool;
/// This method turns the parameters of a DepNodeConstructor into an opaque
/// Fingerprint to be used in DepNode.
/// Not all DepNodeParams support being turned into a Fingerprint (they
/// don't need to if the corresponding DepNode is anonymous).
fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint {
panic!("Not implemented. Accidentally called on anonymous node?")
}
fn to_debug_str(&self, _: TyCtxt<'tcx>) -> String {
format!("{:?}", self)
}
}
impl<'tcx, T> DepNodeParams<'tcx> for T
where
T: HashStable<StableHashingContext<'tcx>> + fmt::Debug,
{
default const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
default fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let mut hcx = tcx.create_stable_hashing_context();
let mut hasher = StableHasher::new();
self.hash_stable(&mut hcx, &mut hasher);
hasher.finish()
}
default fn to_debug_str(&self, _: TyCtxt<'tcx>) -> String {
format!("{:?}", *self)
}
}
impl<'tcx> DepNodeParams<'tcx> for DefId {
const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
tcx.def_path_hash(*self).0
}
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
tcx.def_path_str(*self)
}
}
impl<'tcx> DepNodeParams<'tcx> for DefIndex {
const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
tcx.hir().definitions().def_path_hash(*self).0
}
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
tcx.def_path_str(DefId::local(*self))
}
}
impl<'tcx> DepNodeParams<'tcx> for CrateNum {
const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
let def_id = DefId {
krate: *self,
index: CRATE_DEF_INDEX,
};
tcx.def_path_hash(def_id).0
}
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
tcx.crate_name(*self).as_str().to_string()
}
}
impl<'tcx> DepNodeParams<'tcx> for (DefId, DefId) {
const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
// We actually would not need to specialize the implementation of this
// method but it's faster to combine the hashes than to instantiate a full
// hashing context and stable-hashing state.
fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
let (def_id_0, def_id_1) = *self;
let def_path_hash_0 = tcx.def_path_hash(def_id_0);
let def_path_hash_1 = tcx.def_path_hash(def_id_1);
def_path_hash_0.0.combine(def_path_hash_1.0)
}
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
let (def_id_0, def_id_1) = *self;
format!("({}, {})",
tcx.def_path_debug_str(def_id_0),
tcx.def_path_debug_str(def_id_1))
}
}
impl<'tcx> DepNodeParams<'tcx> for HirId {
const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
// We actually would not need to specialize the implementation of this
// method but it's faster to combine the hashes than to instantiate a full
// hashing context and stable-hashing state.
fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
let HirId {
owner,
local_id,
} = *self;
let def_path_hash = tcx.def_path_hash(DefId::local(owner));
let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
def_path_hash.0.combine(local_id)
}
}
/// A "work product" corresponds to a `.o` (or other) file that we
/// save in between runs. These IDs do not have a `DefId` but rather
/// some independent path or string that persists between runs without
/// the need to be mapped or unmapped. (This ensures we can serialize
/// them even in the absence of a tcx.)
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
RustcEncodable, RustcDecodable)]
pub struct WorkProductId {
hash: Fingerprint
}
impl WorkProductId {
pub fn from_cgu_name(cgu_name: &str) -> WorkProductId {
let mut hasher = StableHasher::new();
cgu_name.len().hash(&mut hasher);
cgu_name.hash(&mut hasher);
WorkProductId {
hash: hasher.finish()
}
}
pub fn from_fingerprint(fingerprint: Fingerprint) -> WorkProductId {
WorkProductId {
hash: fingerprint
}
}
}
impl_stable_hash_for!(struct crate::dep_graph::WorkProductId {
hash
});
|
{
if let Some(def_id) = self.extract_def_id(tcx) {
write!(f, "{}", tcx.def_path_debug_str(def_id))?;
} else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*self) {
write!(f, "{}", s)?;
} else {
write!(f, "{}", self.hash)?;
}
}
|
counters.rs
|
use std::fmt;
use heim_common::prelude::*;
use heim_common::units::{information, Information};
use super::bindings::{if_msghdr2, net_pf_route};
pub struct IoCounters {
name: String,
data: if_msghdr2,
}
impl IoCounters {
pub fn interface(&self) -> &str {
self.name.as_str()
}
pub fn bytes_sent(&self) -> Information {
Information::new::<information::byte>(self.data.ifm_data.ifi_obytes)
}
pub fn bytes_recv(&self) -> Information {
Information::new::<information::byte>(self.data.ifm_data.ifi_ibytes)
}
pub fn packets_sent(&self) -> u64 {
self.data.ifm_data.ifi_opackets
}
pub fn packets_recv(&self) -> u64 {
self.data.ifm_data.ifi_ipackets
}
pub fn errors_sent(&self) -> u64 {
self.data.ifm_data.ifi_oerrors
}
pub fn errors_recv(&self) -> u64 {
self.data.ifm_data.ifi_ierrors
}
pub fn drop_recv(&self) -> u64 {
self.data.ifm_data.ifi_iqdrops
}
}
impl fmt::Debug for IoCounters {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("IoCounters")
.field("name", &self.name)
.finish()
}
}
pub async fn io_counters() -> Result<impl Stream<Item = Result<IoCounters>>> {
let interfaces = unsafe { net_pf_route()? };
let interfaces = interfaces.map(|msg| {
let mut name: [u8; libc::IF_NAMESIZE] = [0; libc::IF_NAMESIZE];
let result = unsafe {
libc::if_indextoname(msg.ifm_index.into(), name.as_mut_ptr() as *mut libc::c_char)
};
if result.is_null() {
return Err(Error::last_os_error().with_ffi("if_indextoname"));
}
let first_nul = name.iter().position(|c| *c == b'\0').unwrap_or(0);
let name = String::from_utf8_lossy(&name[..first_nul]).to_string();
Ok(IoCounters { name, data: msg })
|
}
|
});
Ok(stream::iter(interfaces))
|
simple.rs
|
use std::cell::RefCell;
use std::rc::Rc;
use wasmuri_container::*;
use wasmuri_core::*;
use wasmuri_text::*;
use super::*;
pub struct SimpleTextRenderController {
region: TextRegionProps,
agent: Option<Weak<RefCell<ComponentAgent>>>,
text_model: TextModel,
colors: TextColors
}
impl SimpleTextRenderController {
pub fn new(text: &str, font: &Rc<Font>, region: TextRegionProps, colors: TextColors) -> SimpleTextRenderController {
SimpleTextRenderController {
region,
agent: None,
text_model: Rc::clone(font).create_text_model(text),
colors
}
}
pub fn celled(text: &str, font: &Rc<Font>, region: TextRegionProps, colors: TextColors) -> Rc<RefCell<SimpleTextRenderController>> {
Rc::new(RefCell::new(Self::new(text, font, region, colors)))
}
pub fn tuple(text: &str, font: &Rc<Font>, region: TextRegionProps, colors: TextColors) -> (Rc<RefCell<dyn ComponentBehavior>>, Rc<RefCell<dyn TextRenderController>>) {
let instance = Rc::new(RefCell::new(Self::new(text, font, region, colors)));
(Rc::clone(&instance) as Rc<RefCell<dyn ComponentBehavior>>, instance)
}
// TODO Update render opacity if necessary!
pub fn set_fill_color(&mut self, new_color: Color, agent: &mut ComponentAgent){
self.colors.fill_color = new_color;
agent.request_render();
}
pub fn set_stroke_color(&mut self, new_color: Color, agent: &mut ComponentAgent){
self.colors.stroke_color = new_color;
agent.request_render();
}
pub fn set_background_color(&mut self, new_color: Color, agent: &mut ComponentAgent){
self.colors.background_color = new_color;
agent.request_render();
}
pub fn set_colors(&mut self, new_colors: TextColors, agent: &mut ComponentAgent){
self.colors = new_colors;
agent.request_render();
}
}
impl ComponentBehavior for SimpleTextRenderController {
fn attach(&mut self, agent: &mut dyn LayerAgent){
agent.claim_render_space(self.region.get_max_region(), RenderTrigger::Request, determine_render_opacity(vec![self.colors]),
RenderPhase::Text).expect("Should have render space for SimpleTextRenderHelper");
}
fn set_agent(&mut self, agent: Weak<RefCell<ComponentAgent>>){
self.agent = Some(agent);
}
fn get_agent(&self) -> &Weak<RefCell<ComponentAgent>> {
self.agent.as_ref().expect("Agent should have been set by now")
}
fn render(&mut self, params: &mut RenderParams) -> BehaviorRenderResult {
let region = self.get_current_region();
if self.region.should_clear_remaining(&self.text_model, params) {
self.text_model.get_font().fill_rect(self.get_max_region(), self.colors.background_color);
}
self.text_model.render(region.get_float_min_x(), region.get_float_min_y(), region.get_float_height(), self.colors);
BehaviorRenderResult::without_cursor(vec![PassedRenderAction::new(region)])
}
fn get_cursor(&mut self, _params: &mut CursorParams) -> Option<Cursor> {
None
}
}
impl TextRenderController for SimpleTextRenderController {
fn get_max_region(&self) -> Region {
|
fn get_current_region(&self) -> Region {
self.region.get_current_region(&self.text_model)
}
fn set_text(&mut self, new_text: &str){
self.text_model = Rc::clone(self.text_model.get_font()).create_text_model(new_text);
self.agent.as_ref().expect("Agent should have been set by now").upgrade().expect("Component agent should not have been dropped").borrow_mut().request_render();
}
fn set_text_model(&mut self, new_text: TextModel){
self.text_model = new_text;
self.agent.as_ref().expect("Agent should have been set by now").upgrade().expect("Component agent should not have been dropped").borrow_mut().request_render();
}
}
|
self.region.get_max_region()
}
|
platform.rs
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{lldb_pid_t, sys, SBError, SBLaunchInfo};
use std::ffi::CStr;
/// A platform that can represent the current host or a
/// remote host debug platform.
///
/// The `SBPlatform` class represents the current host, or a remote host.
/// It can be connected to a remote platform in order to provide ways
/// to remotely launch and attach to processes, upload/download files,
/// create directories, run remote shell commands, find locally cached
/// versions of files from the remote system, and much more.
///
/// `SBPlatform` objects can be created and then used to connect to a remote
/// platform which allows the `SBPlatform` to be used to get a list of the
/// current processes on the remote host, attach to one of those processes,
/// install programs on the remote system, attach and launch processes,
/// and much more.
///
/// Every [`SBTarget`] has a corresponding `SBPlatform`. The platform can be
/// specified upon target creation, or the [currently selected] platform
/// will attempt to be used when creating the target automatically as long
/// as the currently selected platform matches the target architecture
/// and executable type. If the architecture or executable type do not match,
/// a suitable platform will be found automatically.
///
/// [`SBTarget`]: crate::SBTarget
/// [currently selected]: crate::SBDebugger::selected_platform
#[derive(Debug)]
pub struct SBPlatform {
/// The underlying raw `SBPlatformRef`.
pub raw: sys::SBPlatformRef,
}
impl SBPlatform {
/// Construct a new `Some(SBPlatform)` or `None`.
pub fn maybe_wrap(raw: sys::SBPlatformRef) -> Option<SBPlatform> {
if unsafe { sys::SBPlatformIsValid(raw) } {
Some(SBPlatform { raw })
} else {
None
}
}
/// Check whether or not this is a valid `SBPlatform` value.
pub fn is_valid(&self) -> bool {
unsafe { sys::SBPlatformIsValid(self.raw) }
}
/// The working directory for this platform.
pub fn
|
(&self) -> &str {
unsafe {
match CStr::from_ptr(sys::SBPlatformGetWorkingDirectory(self.raw)).to_str() {
Ok(s) => s,
_ => panic!("Invalid string?"),
}
}
}
/// The name of the platform.
///
/// When debugging on the host platform, this would be `"host"`.
pub fn name(&self) -> &str {
unsafe {
match CStr::from_ptr(sys::SBPlatformGetName(self.raw)).to_str() {
Ok(s) => s,
_ => panic!("Invalid string?"),
}
}
}
/// The triple used to describe this platform.
///
/// An example value might be `"x86_64-apple-macosx"`.
pub fn triple(&self) -> &str {
unsafe {
match CStr::from_ptr(sys::SBPlatformGetTriple(self.raw)).to_str() {
Ok(s) => s,
_ => panic!("Invalid string?"),
}
}
}
/// The hostname for this platform.
pub fn hostname(&self) -> &str {
unsafe {
match CStr::from_ptr(sys::SBPlatformGetHostname(self.raw)).to_str() {
Ok(s) => s,
_ => panic!("Invalid string?"),
}
}
}
/// The build ID for the platforms' OS version.
pub fn os_build(&self) -> &str {
unsafe {
match CStr::from_ptr(sys::SBPlatformGetOSBuild(self.raw)).to_str() {
Ok(s) => s,
_ => panic!("Invalid string?"),
}
}
}
/// The long form description of the platform's OS version.
///
/// On macOS, this might look like `"Darwin Kernel Version 20.5.0:
/// Sat May 8 05:10:33 PDT 2021; root:xnu-7195.121.3~9/RELEASE_X86_64"`.
pub fn os_description(&self) -> &str {
unsafe {
match CStr::from_ptr(sys::SBPlatformGetOSDescription(self.raw)).to_str() {
Ok(s) => s,
_ => panic!("Invalid string?"),
}
}
}
/// The major component of the platform's OS version.
///
/// On macOS 10.15.4, this would have the value `10`.
pub fn os_major_version(&self) -> u32 {
unsafe { sys::SBPlatformGetOSMajorVersion(self.raw) }
}
/// The minor component of the platform's OS version.
///
/// On macOS 10.15.4, this would have the value `15`.
pub fn os_minor_version(&self) -> u32 {
unsafe { sys::SBPlatformGetOSMinorVersion(self.raw) }
}
/// The patch or update component of the platform's OS version.
///
/// On macOS 10.15.4, this would have the value `4`.
pub fn os_update_version(&self) -> u32 {
unsafe { sys::SBPlatformGetOSUpdateVersion(self.raw) }
}
/// Launch a process. This is not for debugging that process.
pub fn launch(&self, launch_info: &SBLaunchInfo) -> Result<(), SBError> {
let error = SBError::from(unsafe { sys::SBPlatformLaunch(self.raw, launch_info.raw) });
if error.is_success() {
Ok(())
} else {
Err(error)
}
}
/// Kill a process.
pub fn kill(&self, pid: lldb_pid_t) -> Result<(), SBError> {
let error = SBError::from(unsafe { sys::SBPlatformKill(self.raw, pid) });
if error.is_success() {
Ok(())
} else {
Err(error)
}
}
}
impl Clone for SBPlatform {
fn clone(&self) -> SBPlatform {
SBPlatform {
raw: unsafe { sys::CloneSBPlatform(self.raw) },
}
}
}
impl Drop for SBPlatform {
fn drop(&mut self) {
unsafe { sys::DisposeSBPlatform(self.raw) };
}
}
impl From<sys::SBPlatformRef> for SBPlatform {
fn from(raw: sys::SBPlatformRef) -> SBPlatform {
SBPlatform { raw }
}
}
unsafe impl Send for SBPlatform {}
unsafe impl Sync for SBPlatform {}
#[cfg(feature = "graphql")]
graphql_object!(SBPlatform: crate::SBDebugger | &self | {
field is_valid() -> bool {
self.is_valid()
}
field working_directory() -> &str {
self.working_directory()
}
field name() -> &str {
self.name()
}
field triple() -> &str {
self.triple()
}
field hostname() -> &str {
self.hostname()
}
field os_build() -> &str {
self.os_build()
}
field os_description() -> &str {
self.os_description()
}
// TODO(bm) This should be u32
field os_major_version() -> i32 {
self.os_major_version() as i32
}
// TODO(bm) This should be u32
field os_minor_version() -> i32 {
self.os_minor_version() as i32
}
// TODO(bm) This should be u32
field os_update_version() -> i32 {
self.os_update_version() as i32
}
});
|
working_directory
|
regex_validaktor_test.go
|
package validaktor
import (
"regexp"
"testing"
)
type testRegex struct {
exp string
isValid bool
err error
data interface{}
}
func TestRegexValidate(t *testing.T) {
testData := []testRegex{
{exp: "[A-Z]+", isValid: true, err: nil, data: "HELLO"},
{exp: "[0-9]{4,6}", isValid: true, err: nil, data: "12345"},
{exp: "\\w+", isValid: true, err: nil, data: "whatever24"},
{exp: `\w+`, isValid: true, err: nil, data: "iamgood"},
{exp: "[^A-Z]+", isValid: true, err: nil, data: "123456asdf"},
}
for _, v := range testData {
validator := ®exValidator{regex: regexp.MustCompile(v.exp)}
isValid, err := validator.validate(v.data)
if v.isValid != isValid {
t.Errorf("%+v != %+v it should be valid with data %+v", v.isValid, isValid, v.data)
}
if err != v.err {
t.Errorf("there was an error %s", err)
}
}
}
func TestRegexValidateKo(t *testing.T)
|
{
testData := []testRegex{
{exp: "[A-Z]+", isValid: false, data: "1234"},
{exp: "^[0-9]{4,6}$", isValid: false, data: "123456789"},
{exp: "^\\w+$", isValid: false, data: "whate ver24"},
{exp: `^\w+$`, isValid: false, data: " iamg ood"},
{exp: "[^A-Z]+", isValid: false, data: "ASDFQWER"},
}
for _, v := range testData {
validator := ®exValidator{regex: regexp.MustCompile(v.exp)}
isValid, _ := validator.validate(v.data)
if v.isValid != isValid {
t.Errorf("%+v != %+v it should not be valid with data %+v", v.isValid, isValid, v.data)
}
}
}
|
|
model.go
|
// Copyright 2015-2020 Bret Jordan, All rights reserved.
//
// Use of this source code is governed by an Apache 2.0 license that can be
// found in the LICENSE file in the root of the source tree.
package apiroot
import (
"github.com/wxj95/libstix2/objects/properties"
)
// ----------------------------------------------------------------------
// Define Message Type
// ----------------------------------------------------------------------
/*
APIRoot - This type implements the TAXII 2 API Root and defines all of the
properties and methods needed to create and work with this resource. All of the
methods not defined local to this type are inherited from the individual
properties.
The following information comes directly from the TAXII 2.1 specification.
The API Root resource contains general information about the API Root, such as a
human-readable title and description, the TAXII versions it supports, and the
maximum size (max_content_length) of the content body it will accept in a PUT or
POST request.
*/
type APIRoot struct {
properties.TitleProperty
properties.DescriptionProperty
Versions []string `json:"versions"`
MaxContentLength int `json:"max_content_length"`
}
// ----------------------------------------------------------------------
// Initialization Functions
// ----------------------------------------------------------------------
/*
New - This function will create a new TAXII API Root resource and return
it as a pointer.
*/
func
|
() *APIRoot {
var obj APIRoot
return &obj
}
|
New
|
site.rs
|
use std::collections::HashMap;
use errors::Result;
/// Get word count and estimated reading time
pub fn get_reading_analytics(content: &str) -> (usize, usize)
|
/// Resolves an internal link (of the `./posts/something.md#hey` sort) to its absolute link
pub fn resolve_internal_link(link: &str, permalinks: &HashMap<String, String>) -> Result<String> {
// First we remove the ./ since that's gutenberg specific
let clean_link = link.replacen("./", "", 1);
// Then we remove any potential anchor
// parts[0] will be the file path and parts[1] the anchor if present
let parts = clean_link.split('#').collect::<Vec<_>>();
match permalinks.get(parts[0]) {
Some(p) => {
if parts.len() > 1 {
Ok(format!("{}#{}", p, parts[1]))
} else {
Ok(p.to_string())
}
},
None => bail!(format!("Relative link {} not found.", link)),
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::{resolve_internal_link, get_reading_analytics};
#[test]
fn can_resolve_valid_internal_link() {
let mut permalinks = HashMap::new();
permalinks.insert("pages/about.md".to_string(), "https://vincent.is/about".to_string());
let res = resolve_internal_link("./pages/about.md", &permalinks).unwrap();
assert_eq!(res, "https://vincent.is/about");
}
#[test]
fn can_resolve_valid_root_internal_link() {
let mut permalinks = HashMap::new();
permalinks.insert("about.md".to_string(), "https://vincent.is/about".to_string());
let res = resolve_internal_link("./about.md", &permalinks).unwrap();
assert_eq!(res, "https://vincent.is/about");
}
#[test]
fn can_resolve_internal_links_with_anchors() {
let mut permalinks = HashMap::new();
permalinks.insert("pages/about.md".to_string(), "https://vincent.is/about".to_string());
let res = resolve_internal_link("./pages/about.md#hello", &permalinks).unwrap();
assert_eq!(res, "https://vincent.is/about#hello");
}
#[test]
fn errors_resolve_inexistant_internal_link() {
let res = resolve_internal_link("./pages/about.md#hello", &HashMap::new());
assert!(res.is_err());
}
#[test]
fn reading_analytics_short_text() {
let (word_count, reading_time) = get_reading_analytics("Hello World");
assert_eq!(word_count, 2);
assert_eq!(reading_time, 0);
}
#[test]
fn reading_analytics_long_text() {
let mut content = String::new();
for _ in 0..1000 {
content.push_str(" Hello world");
}
let (word_count, reading_time) = get_reading_analytics(&content);
assert_eq!(word_count, 2000);
assert_eq!(reading_time, 10);
}
}
|
{
// Only works for latin language but good enough for a start
let word_count: usize = content.split_whitespace().count();
// https://help.medium.com/hc/en-us/articles/214991667-Read-time
// 275 seems a bit too high though
(word_count, (word_count / 200))
}
|
main.rs
|
mod crab_alignment;
use std::fs;
fn
|
() -> std::io::Result<()> {
let data = fs::read_to_string("./day_07_puzzle_01/input.txt").unwrap();
let numbers = crab_alignment::parse_numbers(&data);
let min_offset = crab_alignment::min_offset_total_target_brute_force(&numbers);
println!("The minimum amount of fuel to align all of the crabs (at position {}) is {}", min_offset.0, min_offset.1);
Ok(())
}
|
main
|
panoptic_fpn_head.py
|
import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from ..builder import HEADS
from ..utils import ConvUpsample
from .base_semantic_head import BaseSemanticHead
@HEADS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
Arg:
num_classes (int): Number of classes, including all stuff
classes and one thing class.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
`end_level`-th layer will not be used.
fg_range (tuple): Range of the foreground classes.
bg_range (tuple): Range of the background classes.
conv_cfg (dict): Dictionary to construct and config
conv layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use ``GN`` by default.
init_cfg (dict or list[dict], optional): Initialization config dict.
loss_seg (dict): the loss of the semantic head.
"""
def
|
(self,
num_classes,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
fg_range=(1, 80),
bg_range=(81, 133),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)
self.fg_range = fg_range
self.bg_range = bg_range
self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1
self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg):
"""Merge thing classes to one class."""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (
gt_semantic_seg <= self.fg_range[1])
bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (
gt_semantic_seg <= self.bg_range[1])
new_gt_seg = fg_mask.int() * (self.bg_nums + 1)
new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,
new_gt_seg)
return new_gt_seg
def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):
"""The loss of PanopticFPN head.
Things classes will be merged to one class in PanopticFPN.
"""
gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)
return super().loss(seg_preds, gt_semantic_seg, label_bias)
def init_weights(self):
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x):
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(feats)
out = dict(seg_preds=seg_preds, feats=feats)
return out
|
__init__
|
generated_ldapconfig.go
|
package client
const (
LDAPCONFIG_TYPE = "ldapconfig"
)
type Ldapconfig struct {
Resource
AccessMode string `json:"accessMode,omitempty" yaml:"access_mode,omitempty"`
AllowedIdentities []interface{} `json:"allowedIdentities,omitempty" yaml:"allowed_identities,omitempty"`
ConnectionTimeout int64 `json:"connectionTimeout,omitempty" yaml:"connection_timeout,omitempty"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty"`
Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
GroupMemberMappingAttribute string `json:"groupMemberMappingAttribute,omitempty" yaml:"group_member_mapping_attribute,omitempty"`
GroupNameField string `json:"groupNameField,omitempty" yaml:"group_name_field,omitempty"`
GroupObjectClass string `json:"groupObjectClass,omitempty" yaml:"group_object_class,omitempty"`
GroupSearchField string `json:"groupSearchField,omitempty" yaml:"group_search_field,omitempty"`
LoginDomain string `json:"loginDomain,omitempty" yaml:"login_domain,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Port int64 `json:"port,omitempty" yaml:"port,omitempty"`
Server string `json:"server,omitempty" yaml:"server,omitempty"`
ServiceAccountPassword string `json:"serviceAccountPassword,omitempty" yaml:"service_account_password,omitempty"`
ServiceAccountUsername string `json:"serviceAccountUsername,omitempty" yaml:"service_account_username,omitempty"`
Tls bool `json:"tls,omitempty" yaml:"tls,omitempty"`
UserDisabledBitMask int64 `json:"userDisabledBitMask,omitempty" yaml:"user_disabled_bit_mask,omitempty"`
UserEnabledAttribute string `json:"userEnabledAttribute,omitempty" yaml:"user_enabled_attribute,omitempty"`
UserLoginField string `json:"userLoginField,omitempty" yaml:"user_login_field,omitempty"`
UserMemberAttribute string `json:"userMemberAttribute,omitempty" yaml:"user_member_attribute,omitempty"`
UserNameField string `json:"userNameField,omitempty" yaml:"user_name_field,omitempty"`
UserObjectClass string `json:"userObjectClass,omitempty" yaml:"user_object_class,omitempty"`
UserSearchField string `json:"userSearchField,omitempty" yaml:"user_search_field,omitempty"`
}
type LdapconfigCollection struct {
Collection
Data []Ldapconfig `json:"data,omitempty"`
}
type LdapconfigClient struct {
rancherClient *RancherClient
}
type LdapconfigOperations interface {
List(opts *ListOpts) (*LdapconfigCollection, error)
Create(opts *Ldapconfig) (*Ldapconfig, error)
Update(existing *Ldapconfig, updates interface{}) (*Ldapconfig, error)
ById(id string) (*Ldapconfig, error)
Delete(container *Ldapconfig) error
}
func
|
(rancherClient *RancherClient) *LdapconfigClient {
return &LdapconfigClient{
rancherClient: rancherClient,
}
}
func (c *LdapconfigClient) Create(container *Ldapconfig) (*Ldapconfig, error) {
resp := &Ldapconfig{}
err := c.rancherClient.doCreate(LDAPCONFIG_TYPE, container, resp)
return resp, err
}
func (c *LdapconfigClient) Update(existing *Ldapconfig, updates interface{}) (*Ldapconfig, error) {
resp := &Ldapconfig{}
err := c.rancherClient.doUpdate(LDAPCONFIG_TYPE, &existing.Resource, updates, resp)
return resp, err
}
func (c *LdapconfigClient) List(opts *ListOpts) (*LdapconfigCollection, error) {
resp := &LdapconfigCollection{}
err := c.rancherClient.doList(LDAPCONFIG_TYPE, opts, resp)
return resp, err
}
func (c *LdapconfigClient) ById(id string) (*Ldapconfig, error) {
resp := &Ldapconfig{}
err := c.rancherClient.doById(LDAPCONFIG_TYPE, id, resp)
if apiError, ok := err.(*ApiError); ok {
if apiError.StatusCode == 404 {
return nil, nil
}
}
return resp, err
}
func (c *LdapconfigClient) Delete(container *Ldapconfig) error {
return c.rancherClient.doResourceDelete(LDAPCONFIG_TYPE, &container.Resource)
}
|
newLdapconfigClient
|
models.py
|
import io
import json
import gzip
from base64 import b64decode
from http.cookies import SimpleCookie
import chardet
import rfc3986
import graphene
import yaml
from requests.structures import CaseInsensitiveDict
from requests.cookies import RequestsCookieJar
from starlette.datastructures import MutableHeaders
from starlette.requests import Request as StarletteRequest
from starlette.responses import Response as StarletteResponse
from urllib.parse import parse_qs
from .status_codes import HTTP_200
from .statics import DEFAULT_ENCODING
class QueryDict(dict):
def __init__(self, query_string):
self.update(parse_qs(query_string))
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
list_ = super().__getitem__(key)
try:
return list_[-1]
except IndexError:
return []
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _get_list(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def get_list(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._get_list(key, default, force_list=True)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def items_list(self):
"""
Yield (key, value) pairs, where value is the the list.
"""
yield from super().items()
# TODO: add slots
class Request:
__slots__ = ["_starlette", "formats", "_headers", "_encoding", "api", "_content"]
def __init__(self, scope, receive, api=None):
self._starlette = StarletteRequest(scope, receive)
self.formats = None
self._encoding = None
self.api = api
self._content = None
headers = CaseInsensitiveDict()
for key, value in self._starlette.headers.items():
headers[key] = value
self._headers = headers
@property
def session(self):
"""The session data, in dict form, from the Request."""
if "Responder-Session" in self.cookies:
data = self.cookies[self.api.session_cookie]
data = self.api._signer.unsign(data)
data = b64decode(data)
return json.loads(data)
return {}
@property
def headers(self):
"""A case-insensitive dictionary, containing all headers sent in the Request."""
return self._headers
@property
def mimetype(self):
return self.headers.get("Content-Type", "")
@property
def method(self):
"""The incoming HTTP method used for the request, lower-cased."""
return self._starlette.method.lower()
@property
def full_url(self):
"""The full URL of the Request, query parameters and all."""
return str(self._starlette.url)
@property
def url(self):
"""The parsed URL of the Request."""
return rfc3986.urlparse(self.full_url)
@property
def cookies(self):
"""The cookies sent in the Request, as a dictionary."""
cookies = RequestsCookieJar()
cookie_header = self.headers.get("Cookie", "")
bc = SimpleCookie(cookie_header)
for k, v in bc.items():
cookies[k] = v
return cookies.get_dict()
@property
def
|
(self):
"""A dictionary of the parsed query parameters used for the Request."""
try:
return QueryDict(self.url.query)
except AttributeError:
return QueryDict({})
@property
async def encoding(self):
"""The encoding of the Request's body. Can be set, manually. Must be awaited."""
# Use the user-set encoding first.
if self._encoding:
return self._encoding
# Then try what's defined by the Request.
elif await self.declared_encoding:
return self.declared_encoding
# Then, automatically detect the encoding.
else:
return await self.apparent_encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
@property
async def content(self):
"""The Request body, as bytes. Must be awaited."""
if not self._content:
self._content = await self._starlette.body()
return self._content
@property
async def text(self):
"""The Request body, as unicode. Must be awaited."""
return (await self.content).decode(await self.encoding)
@property
async def declared_encoding(self):
if "Encoding" in self.headers:
return self.headers["Encoding"]
@property
async def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library. Must be awaited."""
declared_encoding = await self.declared_encoding
if declared_encoding:
return declared_encoding
else:
return chardet.detect(await self.content)["encoding"]
@property
def is_secure(self):
return self.url.scheme == "https"
def accepts(self, content_type):
"""Returns ``True`` if the incoming Request accepts the given ``content_type``."""
return content_type in self.headers.get("Accept", [])
async def media(self, format=None):
"""Renders incoming json/yaml/form data as Python objects. Must be awaited.
:param format: The name of the format being used. Alternatively accepts a custom callable for the format type.
"""
if format is None:
format = "yaml" if "yaml" in self.mimetype or "" else "json"
format = "form" if "form" in self.mimetype or "" else format
if format in self.formats:
return await self.formats[format](self)
else:
return await format(self)
class Response:
__slots__ = [
"req",
"status_code",
"text",
"content",
"encoding",
"media",
"headers",
"formats",
"cookies",
"session",
]
def __init__(self, req, *, formats):
self.req = req
self.status_code = None #: The HTTP Status Code to use for the Response.
self.text = None #: A unicode representation of the response body.
self.content = None #: A bytes representation of the response body.
self.encoding = DEFAULT_ENCODING
self.media = (
None
) #: A Python object that will be content-negotiated and sent back to the client. Typically, in JSON formatting.
self.headers = (
{}
) #: A Python dictionary of ``{key: value}``, representing the headers of the response.
self.formats = formats
self.cookies = {} #: The cookies set in the Response, as a dictionary
self.session = (
req.session.copy()
) #: The cookie-based session data, in dict form, to add to the Response.
@property
async def body(self):
if self.content:
return (self.content, {})
if self.text:
return (self.text.encode(self.encoding), {"Encoding": self.encoding})
for format in self.formats:
if self.req.accepts(format):
return (await self.formats[format](self, encode=True)), {}
# Default to JSON anyway.
return (
await self.formats["json"](self, encode=True),
{"Content-Type": "application/json"},
)
async def __call__(self, receive, send):
body, headers = await self.body
if self.headers:
headers.update(self.headers)
response = StarletteResponse(
body, status_code=self.status_code, headers=headers
)
await response(receive, send)
|
params
|
spectral.py
|
"""Implements spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.externals import six
from sklearn.utils.arpack import svds
from sklearn.utils.arpack import eigsh
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.extmath import make_nonnegative
from sklearn.utils.extmath import norm
from sklearn.utils.validation import assert_all_finite
from sklearn.utils.validation import check_arrays
from .utils import check_array_ndim
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
|
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if cluster `i`
contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
The bicluster label of each row.
`column_labels_` : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if cluster `i`
contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
Row partition labels.
`column_labels_` : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{}'. method must be"
" one of {}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{}'. svd_method must be"
" one of {}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float64)
check_array_ndim(X)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
|
create_poll.rs
|
//! Create a poll for a specific Twitch channel.
//! [`create-poll`](https://dev.twitch.tv/docs/api/reference#create-poll)
//!
//! # Accessing the endpoint
//!
//! ## Request: [CreatePollRequest]
//!
//! To use this endpoint, construct a [`CreatePollRequest`] with the [`CreatePollRequest::new()`] method.
//!
//! ```rust
//! use twitch_api2::helix::polls::create_poll;
//! let request = create_poll::CreatePollRequest::new();
//! ```
//!
//! ## Body: [CreatePollBody]
//!
//! We also need to provide a body to the request containing what we want to change.
//!
//! ```
//! # use twitch_api2::helix::polls::create_poll;
//! let body = create_poll::CreatePollBody::builder()
//! .broadcaster_id("141981764")
//! .title("Heads or Tails?")
//! .choices(vec![
//! create_poll::NewPollChoice::new("Heads"),
//! create_poll::NewPollChoice::new("Tails"),
//! ])
//! .channel_points_voting_enabled(true)
//! .channel_points_per_vote(100)
//! .duration(1800)
//! .build();
//! ```
//!
//! ## Response: [CreatePollResponse]
//!
//!
//! Send the request to receive the response with [`HelixClient::req_post()`](helix::HelixClient::req_post).
//!
//!
//! ```rust, no_run
//! use twitch_api2::helix::{self, polls::create_poll};
//! # use twitch_api2::client;
//! # #[tokio::main]
//! # async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
//! # let client: helix::HelixClient<'static, client::DummyHttpClient> = helix::HelixClient::default();
//! # let token = twitch_oauth2::AccessToken::new("validtoken".to_string());
//! # let token = twitch_oauth2::UserToken::from_existing(&client, token, None, None).await?;
//! let request = create_poll::CreatePollRequest::builder()
//! .build();
//! let body = create_poll::CreatePollBody::builder()
//! .broadcaster_id("141981764")
//! .title("Heads or Tails?")
//! .choices(vec![create_poll::NewPollChoice::new("Heads"), create_poll::NewPollChoice::new("Tails")])
//! .channel_points_voting_enabled(true)
//! .channel_points_per_vote(100)
//! .duration(1800)
//! .build();
//! let response: create_poll::CreatePollResponse = client.req_post(request, body, &token).await?.data;
//! # Ok(())
//! # }
//! ```
//!
//! You can also get the [`http::Request`] with [`request.create_request(&token, &client_id)`](helix::RequestPost::create_request)
//! and parse the [`http::Response`] with [`CreatePollRequest::parse_response(None, &request.get_uri(), response)`](CreatePollRequest::parse_response)
use super::*;
use helix::RequestPost;
/// Query Parameters for [Create Poll](super::create_poll)
///
/// [`create-poll`](https://dev.twitch.tv/docs/api/reference#create-poll)
#[derive(PartialEq, typed_builder::TypedBuilder, Deserialize, Serialize, Clone, Debug, Default)]
#[non_exhaustive]
pub struct CreatePollRequest {}
impl CreatePollRequest {
/// Create a new [`CreatePollRequest`]
pub fn new() -> Self { Self {} }
}
/// Body Parameters for [Create Poll](super::create_poll)
///
/// [`create-poll`](https://dev.twitch.tv/docs/api/reference#create-poll)
#[derive(PartialEq, typed_builder::TypedBuilder, Deserialize, Serialize, Clone, Debug)]
#[non_exhaustive]
pub struct CreatePollBody {
/// The broadcaster running polls. Provided broadcaster_id must match the user_id in the user OAuth token.
#[builder(setter(into))]
pub broadcaster_id: types::UserId,
/// Question displayed for the poll. Maximum: 60 characters.
#[builder(setter(into))]
pub title: String,
/// Total duration for the poll (in seconds). Minimum: 15. Maximum: 1800.
pub duration: i64,
/// Array of the poll choices. Minimum: 2 choices. Maximum: 5 choices.
pub choices: Vec<NewPollChoice>,
/// Indicates if Bits can be used for voting. Default: false
#[builder(default, setter(into))]
pub bits_voting_enabled: Option<bool>,
/// Number of Bits required to vote once with Bits. Minimum: 0. Maximum: 10000.
#[builder(default, setter(into))]
pub bits_per_vote: Option<i64>,
/// Indicates if Channel Points can be used for voting. Default: false
#[builder(default, setter(into))]
pub channel_points_voting_enabled: Option<bool>,
/// Number of Channel Points required to vote once with Channel Points. Minimum: 0. Maximum: 1000000.
#[builder(default, setter(into))]
pub channel_points_per_vote: Option<i64>,
}
impl helix::private::SealedSerialize for CreatePollBody {}
// FIXME: I'd prefer this to be a Vec<String> on CreatePollBody
/// Choice settings for a poll
#[derive(PartialEq, typed_builder::TypedBuilder, Deserialize, Serialize, Clone, Debug)]
#[non_exhaustive]
pub struct NewPollChoice {
/// Text displayed for the choice. Maximum: 25 characters.
pub title: String,
}
impl NewPollChoice {
/// Create a new [`NewPollChoice`]
pub fn new(title: impl Into<String>) -> Self {
Self {
title: title.into(),
}
}
}
/// Return Values for [Create Poll](super::create_poll)
///
/// [`create-poll`](https://dev.twitch.tv/docs/api/reference#create-poll)
pub type CreatePollResponse = super::Poll;
impl Request for CreatePollRequest {
type Response = CreatePollResponse;
const PATH: &'static str = "polls";
#[cfg(feature = "twitch_oauth2")]
const SCOPE: &'static [twitch_oauth2::Scope] = &[twitch_oauth2::Scope::ChannelManagePolls];
}
impl RequestPost for CreatePollRequest {
type Body = CreatePollBody;
fn parse_inner_response(
request: Option<Self>,
uri: &http::Uri,
response_str: &str,
status: http::StatusCode,
) -> Result<helix::Response<Self, Self::Response>, helix::HelixRequestPostError>
where
Self: Sized,
{
let response: helix::InnerResponse<Vec<Self::Response>> =
helix::parse_json(response_str, true).map_err(|e| {
helix::HelixRequestPostError::DeserializeError(
response_str.to_string(),
e,
uri.clone(),
status,
)
})?;
let data = response.data.into_iter().next().ok_or_else(|| {
helix::HelixRequestPostError::InvalidResponse {
reason: "response included no data",
response: response_str.to_string(),
status,
uri: uri.clone(),
}
})?;
Ok(helix::Response {
data,
pagination: response.pagination.cursor,
request,
total: None,
other: None,
})
}
}
#[cfg(test)]
#[test]
fn test_request() {
use helix::*;
let req = CreatePollRequest::builder().build();
let body = CreatePollBody::builder()
.broadcaster_id("141981764")
.title("Heads or Tails?")
.choices(vec![
NewPollChoice::new("Heads"),
NewPollChoice::new("Tails"),
])
.channel_points_voting_enabled(true)
.channel_points_per_vote(100)
.duration(1800)
.build();
dbg!(req.create_request(body, "token", "clientid").unwrap());
// From twitch docs
let data = br##"
{
"data": [
{
"id": "ed961efd-8a3f-4cf5-a9d0-e616c590cd2a",
"broadcaster_id": "141981764",
"broadcaster_name": "TwitchDev",
"broadcaster_login": "twitchdev",
|
"title": "Heads or Tails?",
"choices": [
{
"id": "4c123012-1351-4f33-84b7-43856e7a0f47",
"title": "Heads",
"votes": 0,
"channel_points_votes": 0,
"bits_votes": 0
},
{
"id": "279087e3-54a7-467e-bcd0-c1393fcea4f0",
"title": "Tails",
"votes": 0,
"channel_points_votes": 0,
"bits_votes": 0
}
],
"bits_voting_enabled": false,
"bits_per_vote": 0,
"channel_points_voting_enabled": true,
"channel_points_per_vote": 100,
"status": "ACTIVE",
"duration": 1800,
"started_at": "2021-03-19T06:08:33.871278372Z"
}
]
}
"##
.to_vec();
let http_response = http::Response::builder().status(200).body(data).unwrap();
// This is marked as 204 in twitch docs, but in reality it's 200
let uri = req.get_uri().unwrap();
assert_eq!(uri.to_string(), "https://api.twitch.tv/helix/polls?");
dbg!(CreatePollRequest::parse_response(Some(req), &uri, http_response).unwrap());
}
| |
feed_parse_extractWaterBlog.py
|
def extractWaterBlog(item):
|
'''
Parser for 'water.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
|
model.py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import Any, Dict, Union
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, PARAMS_OVERRIDE_KEY, ArmConstants, LONG_URI_FORMAT, AssetTypes
from azure.ai.ml._restclient.v2022_05_01.models import (
ModelContainerData,
ModelVersionDetails,
ModelVersionData,
FlavorData,
)
from azure.ai.ml._schema import ModelSchema
from azure.ai.ml._utils._arm_id_utils import AMLNamedArmId, AMLVersionedArmId
from azure.ai.ml._utils.utils import load_yaml, snake_to_pascal
from azure.ai.ml.entities._assets import Artifact
from .artifact import ArtifactStorageInfo
from azure.ai.ml.entities._util import load_from_dict, get_md5_string
from azure.ai.ml._utils._asset_utils import get_ignore_file, get_object_hash
class
|
(Artifact):
"""Model for training and scoring.
:param name: Name of the resource.
:type name: str
:param version: Version of the resource.
:type version: str
:param type: The storage format for this entity. Used for NCD. Possible values include:
"custom_model", "mlflow_model", "triton_model".
:type type: str
:param utc_time_created: Date and time when the model was created, in
UTC ISO 8601 format. (e.g. '2020-10-19 17:44:02.096572')
:type utc_time_created: str
:param flavors: The flavors in which the model can be interpreted.
(e.g. {sklearn: {sklearn_version: 0.23.2}, python_function: {loader_module: office.plrmodel, python_version: 3.6})
:type flavors: Dict[str, Any]
:param path: A remote uri or a local path pointing at a model.
Example: "azureml://subscriptions/my-sub-id/resourcegroups/my-rg/workspaces/myworkspace/datastores/mydatastore/paths/path_on_datastore/"
:type path: str
:param description: Description of the resource.
:type description: str
:param tags: Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param properties: The asset property dictionary.
:type properties: dict[str, str]
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
"""
def __init__(
self,
*,
name: str = None,
version: str = None,
type: str = None,
path: Union[str, PathLike] = None,
utc_time_created: str = None,
flavors: Dict[str, Dict[str, Any]] = None,
description: str = None,
tags: Dict = None,
properties: Dict = None,
**kwargs,
):
self.job_name = kwargs.pop("job_name", None)
super().__init__(
name=name,
version=version,
path=path,
description=description,
tags=tags,
properties=properties,
**kwargs,
)
self.utc_time_created = utc_time_created
self.flavors = dict(flavors) if flavors else None
self._arm_type = ArmConstants.MODEL_VERSION_TYPE
self.type = type or AssetTypes.CUSTOM_MODEL
if self._is_anonymous and self.path:
_ignore_file = get_ignore_file(self.path)
_upload_hash = get_object_hash(self.path, _ignore_file)
self.name = get_md5_string(_upload_hash)
@classmethod
def load(
cls,
path: Union[PathLike, str],
params_override: list = None,
**kwargs,
) -> "Model":
"""Construct a model object from yaml file.
:param path: Path to a local file as the source.
:type path: str
:param params_override: Fields to overwrite on top of the yaml file. Format is [{"field1": "value1"}, {"field2": "value2"}]
:type params_override: list
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Constructed model object.
:rtype: Model
"""
yaml_dict = load_yaml(path)
return cls._load(data=yaml_dict, yaml_path=path, params_override=params_override, **kwargs)
# For lack of bidirectional map in Python, defining the mapping in two ways in one dictionary
@classmethod
def _load(
cls,
data: Dict = None,
yaml_path: Union[PathLike, str] = None,
params_override: list = None,
**kwargs,
) -> "Model":
params_override = params_override or []
data = data or {}
context = {
BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"),
PARAMS_OVERRIDE_KEY: params_override,
}
return load_from_dict(ModelSchema, data, context, **kwargs)
def _to_dict(self) -> Dict:
return ModelSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _from_rest_object(cls, model_rest_object: ModelVersionData) -> "Model":
rest_model_version: ModelVersionDetails = model_rest_object.properties
arm_id = AMLVersionedArmId(arm_id=model_rest_object.id)
flavors = {key: flavor.data for key, flavor in rest_model_version.flavors.items()}
model = Model(
id=model_rest_object.id,
name=arm_id.asset_name,
version=arm_id.asset_version,
path=rest_model_version.model_uri,
description=rest_model_version.description,
tags=rest_model_version.tags,
flavors=flavors,
properties=rest_model_version.properties,
creation_context=model_rest_object.system_data,
type=rest_model_version.model_type,
job_name=rest_model_version.job_name,
)
return model
@classmethod
def _from_container_rest_object(cls, model_container_rest_object: ModelContainerData) -> "Model":
model = Model(
name=model_container_rest_object.name,
version="1",
id=model_container_rest_object.id,
creation_context=model_container_rest_object.system_data,
)
model.latest_version = model_container_rest_object.properties.latest_version
# Setting version to None since if version is not provided it is defaulted to "1".
# This should go away once container concept is finalized.
model.version = None
return model
def _to_rest_object(self) -> ModelVersionData:
model_version = ModelVersionDetails(
description=self.description,
tags=self.tags,
properties=self.properties,
flavors={key: FlavorData(data=dict(value)) for key, value in self.flavors.items()}
if self.flavors
else None, # flatten OrderedDict to dict
model_type=self.type,
model_uri=self.path,
is_anonymous=self._is_anonymous,
)
model_version_resource = ModelVersionData(properties=model_version)
return model_version_resource
def _update_path(self, asset_artifact: ArtifactStorageInfo) -> None:
aml_datastore_id = AMLNamedArmId(asset_artifact.datastore_arm_id)
self.path = LONG_URI_FORMAT.format(
aml_datastore_id.subscription_id,
aml_datastore_id.resource_group_name,
aml_datastore_id.workspace_name,
aml_datastore_id.asset_name,
asset_artifact.relative_path,
)
def _to_arm_resource_param(self, **kwargs):
properties = self._to_rest_object().properties
return {
self._arm_type: {
ArmConstants.NAME: self.name,
ArmConstants.VERSION: self.version,
ArmConstants.PROPERTIES_PARAMETER_NAME: self._serialize.body(properties, "ModelVersionDetails"),
}
}
|
Model
|
fuse_modules.py
|
import copy
import torch.nn as nn
from torch.quantization.fuser_method_mappings import get_fuser_method
# for backward compatiblity
from torch.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401
from torch.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401
from typing import List, Optional
# Generalization of getattr
def _get_module(model, submodule_key):
tokens = submodule_key.split('.')
cur_mod = model
for s in tokens:
cur_mod = getattr(cur_mod, s)
return cur_mod
# Generalization of setattr
def _set_module(model, submodule_key, module):
tokens = submodule_key.split('.')
sub_tokens = tokens[:-1]
cur_mod = model
for s in sub_tokens:
cur_mod = getattr(cur_mod, s)
setattr(cur_mod, tokens[-1], module)
def fuse_known_modules(mod_list, additional_fuser_method_mapping=None):
r"""Returns a list of modules that fuses the operations specified
in the input module list.
Fuses only the following sequence of modules:
conv, bn
conv, bn, relu
conv, relu
linear, bn
linear, relu
For these sequences, the first element in the output module list performs
the fused operation. The rest of the elements are set to nn.Identity()
"""
types = tuple(type(m) for m in mod_list)
fuser_method = get_fuser_method(types, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(types))
new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)
fused = fuser_method(*mod_list)
# NOTE: forward hooks not processed in the two following for loops will be lost after the fusion
# Move pre forward hooks of the base module to resulting fused module
for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items():
fused.register_forward_pre_hook(pre_hook_fn)
del mod_list[0]._forward_pre_hooks[handle_id]
# Move post forward hooks of the last module to resulting fused module
for handle_id, hook_fn in mod_list[-1]._forward_hooks.items():
fused.register_forward_hook(hook_fn)
del mod_list[-1]._forward_hooks[handle_id]
new_mod[0] = fused
for i in range(1, len(mod_list)):
identity = nn.Identity()
identity.training = mod_list[0].training
new_mod[i] = identity
return new_mod
def _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
|
def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
r"""Fuses a list of modules into a single module
Fuses only the following sequence of modules:
conv, bn
conv, bn, relu
conv, relu
linear, relu
bn, relu
All other sequences are left unchanged.
For these sequences, replaces the first item in the list
with the fused module, replacing the rest of the modules
with identity.
Args:
model: Model containing the modules to be fused
modules_to_fuse: list of list of module names to fuse. Can also be a list
of strings if there is only a single list of modules to fuse.
inplace: bool specifying if fusion happens in place on the model, by default
a new model is returned
fuser_func: Function that takes in a list of modules and outputs a list of fused modules
of the same length. For example,
fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]
Defaults to torch.quantization.fuse_known_modules
`fuse_custom_config_dict`: custom configuration for fusion
.. code-block:: python
# Example of fuse_custom_config_dict
fuse_custom_config_dict = {
# Additional fuser_method mapping
"additional_fuser_method_mapping": {
(torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn
},
}
Returns:
model with fused modules. A new copy is created if inplace=True.
Examples::
>>> m = myModel()
>>> # m is a module containing the sub-modules below
>>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']]
>>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
>>> output = fused_m(input)
>>> m = myModel()
>>> # Alternately provide a single list of modules to fuse
>>> modules_to_fuse = ['conv1', 'bn1', 'relu1']
>>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
>>> output = fused_m(input)
"""
if not inplace:
model = copy.deepcopy(model)
if all(isinstance(module_element, str) for module_element in modules_to_fuse):
# Handle case of modules_to_fuse being a list
_fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict)
else:
# Handle case of modules_to_fuse being a list of lists
for module_list in modules_to_fuse:
_fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict)
return model
|
if fuse_custom_config_dict is None:
fuse_custom_config_dict = {}
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
mod_list = []
for item in modules_to_fuse:
mod_list.append(_get_module(model, item))
# Fuse list of modules
new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping)
# Replace original module list with fused module list
for i, item in enumerate(modules_to_fuse):
_set_module(model, item, new_mod_list[i])
|
engine.py
|
from __future__ import print_function, division
import itertools
import re
import sys
import os
import platform
import numpy as np
import model
from config import config
CLUE_PATTERN = r'^([a-zA-Z]+) ({0})$'
UNLIMITED = "unlimited"
# noinspection PyAttributeOutsideInit
class GameEngine(object):
def __init__(self, seed=None, expert=False, word2vec_models=None):
# Load our word list if necessary.
# TODO: Max length of 11 is hardcoded here and in print_board()
if word2vec_models is None:
word2vec_models = {}
with open(config.word_list) as f:
_words = [line.rstrip().lower().replace(' ', '_') for line in f.readlines()]
self.words = np.array(_words)
# Initialize our word embedding models.
self.models = {k: model.WordEmbedding(w2v) for k, w2v in word2vec_models.items()}
# Initialize random numbers.
self.generator = np.random.RandomState(seed=seed)
# Register expert mode
self.expert = expert
self.unfound_words = (set(), set())
# Useful regular expressions.
if self.expert:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]|" + UNLIMITED))
else:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]"))
def initialize_random_game(self, size=5):
self.size = size
# Shuffle the wordlist.
shuffle = self.generator.choice(
len(self.words), size * size, replace=False)
self.board = self.words[shuffle]
# Specify the layout for this game.
assignments = self.generator.permutation(size * size)
self.owner = np.empty(size * size, int)
self.owner[assignments[0]] = 0 # assassin
self.owner[assignments[1:10]] = 1 # first player: 9 words
self.owner[assignments[10:18]] = 2 # second player: 8 words
self.owner[assignments[18:]] = 3 # bystander: 7 words
self.assassin_word = self.board[self.owner == 0]
# All cards are initially visible.
self.visible = np.ones_like(self.owner, dtype=bool)
self.num_turns = -1
def initialize_from_words(self, initial_words, size=5):
"""
The initial_words parameter should be in the format:
ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
The total number of words must be <= size * size. Any missing words
are considered to be already covered and neutral.
"""
self.size = size
word_groups = initial_words.split(';')
if len(word_groups) != 4:
raise ValueError('Expected 4 groups separated by semicolon.')
board, owner, visible = [], [], []
for group_index, word_group in enumerate(word_groups):
words = word_group.split(',')
for word in words:
word = word.lower().replace(' ', '_')
if word not in self.words:
raise ValueError('Invalid word "{0}".'.format(word))
if word in board:
raise ValueError('Duplicate word "{0}".'.format(word))
board.append(word)
owner.append(group_index)
visible.append(True)
if len(board) > size * size:
raise ValueError('Too many words. Expected <= {0}.'.format(size * size))
# Add dummy hidden words if necessary.
while len(board) < size * size:
board.append('---')
owner.append(3)
visible.append(False)
self.board = np.array(board)
self.owner = np.array(owner)
self.visible = np.array(visible)
# Perform a random shuffle of the board.
shuffle = self.generator.permutation(size * size)
self.board = self.board[shuffle]
self.owner = self.owner[shuffle]
self.visible = self.visible[shuffle]
self.assassin_word = self.board[self.owner == 0]
self.num_turns = -1
def print_board(self, spymaster=False, clear_screen=True):
if clear_screen:
if platform.system() == 'Windows':
os.system('cls')
else:
print(chr(27) + '[2J')
board = self.board.reshape(self.size, self.size)
owner = self.owner.reshape(self.size, self.size)
visible = self.visible.reshape(self.size, self.size)
for row in range(self.size):
for col in range(self.size):
word = board[row, col]
tag = '#<>-'[owner[row, col]]
if not visible[row, col]:
word = tag * 11
elif not spymaster:
tag = ' '
if not spymaster or owner[row, col] in (0, 1, 2):
word = word.upper()
print('{0}{1:11s} '.format(tag, word), end='')
print('')
def play_computer_spymaster(self, gamma=1.0, verbose=True):
say('Thinking...')
sys.stdout.flush()
# Loop over all permutations of words.
num_words = len(self.player_words)
best_score, saved_clues = [], []
for count in range(max(num_words, 2), 0, -1):
# Multiply similarity scores by this factor for any clue
# corresponding to this many words.
bonus_factor = count ** gamma
for group in itertools.combinations(range(num_words), count):
words = self.player_words[list(group)]
clue, score = self.models[f'{self.player + 1} Master'].get_clue(clue_words=words,
pos_words=self.player_words,
neg_words=np.concatenate((
self.opponent_words,
self.neutral_words)),
veto_words=self.assassin_word)
if clue:
best_score.append(score * bonus_factor)
saved_clues.append((clue, words))
num_clues = len(saved_clues)
order = sorted(range(num_clues), key=lambda k: best_score[k], reverse=True)
if verbose:
self.print_board(spymaster=True)
for i in order[:10]:
clue, words = saved_clues[i]
say(u'{0:.3f} {1} = {2}'.format(best_score[i], ' + '.join([w.upper() for w in words]), clue))
clue, words = saved_clues[order[0]]
self.unfound_words[self.player].update(words)
if self.expert and self._should_say_unlimited(nb_clue_words=len(words)):
return clue, UNLIMITED
else:
return clue, len(words)
def _should_say_unlimited(self, nb_clue_words, threshold_opponent=2):
"""
Announce "unlimited" if :
(1) the opposing team risks winning with their next clue,
(2) and our +1 guess isn't enough to catch up during this clue,
(3) but all the words hinted by the current and previous clues
are enough to catch up and win
"""
return (len(self.opponent_words) <= threshold_opponent # (1)
and nb_clue_words + 1 < len(self.player_words) # (2)
and self.unfound_words[self.player]
== set(self.player_words)) # (3)
def play_human_spymaster(self):
self.print_board(spymaster=True)
while True:
clue = ask('{0} Enter your clue: '.format(self.player_label))
matched = self.valid_clue.match(clue)
if matched:
word, count = matched.groups()
if count != UNLIMITED:
count = int(count)
return word, count
say('Invalid clue, should be WORD COUNT.')
def play_human_team(self, word, count):
num_guesses = 0
while (self.expert and count == UNLIMITED) or num_guesses < count + 1:
self.print_board(clear_screen=(num_guesses == 0))
say(u'{0} your clue is: {1} {2}'.format(self.player_label, word, count))
num_guesses += 1
while True:
guess = ask('{0} enter your guess #{1}: '.format(self.player_label, num_guesses))
guess = guess.strip().lower().replace(' ', '_')
if guess == '':
# Team does not want to make any more guesses.
|
if guess in self.board[self.visible]:
break
say('Invalid guess, should be a visible word.')
loc = np.where(self.board == guess)[0]
self.visible[loc] = False
if guess == self.assassin_word:
say('{0} You guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def play_computer_team(self, word, count):
num_guesses = 0
say(u'{0} (computer) your clue is: {1} {2}'.format(self.player_label, word, count))
guesses = self.models[f'{self.player + 1} Guesser'].get_closest_board_words_to(word, count, self.player_words)
for guess in guesses:
num_guesses += 1
say(f'Computer guess #{num_guesses}: {guess}')
loc = np.where(self.board == guess)[0]
self.visible[loc] = False
if guess == self.assassin_word:
say('{0} (computer) guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} (computer) You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations computer, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry computer, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry computer, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def next_turn(self):
self.num_turns += 1
self.player = self.num_turns % 2
self.opponent = (self.player + 1) % 2
self.player_label = '<>'[self.player] * 3
self.player_words = self.board[(self.owner == self.player + 1) & self.visible]
self.opponent_words = self.board[(self.owner == self.opponent + 1) & self.visible]
self.neutral_words = self.board[(self.owner == 3) & self.visible]
def play_turn(self, spymaster='human', team='human'):
self.next_turn()
if spymaster == 'human':
word, count = self.play_human_spymaster()
else:
word, count = self.play_computer_spymaster()
if team == 'human':
ongoing = self.play_human_team(word, count)
else:
ongoing = self.play_computer_team(word, count)
return ongoing
def play_game(self, spymaster1='human', team1='human',
spymaster2='human', team2='human', init=None):
if init is None:
self.initialize_random_game()
else:
self.initialize_from_words(init)
while True:
if not self.play_turn(spymaster1, team1): break
if not self.play_turn(spymaster2, team2): break
def say(message):
print((message + '\n').encode('utf8'))
def ask(message):
try:
return input(message)
except KeyboardInterrupt:
say('\nBye.')
sys.exit(0)
|
return True
|
fake_forwarding_rules.py
|
#!/usr/bin/env python
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test forwarding rules data."""
from google.cloud.security.common.util import parser
FAKE_FORWARDING_RULE1 = {
"kind": "compute#forwardingRule",
"description": "",
"IPAddress": "10.10.10.1",
"region": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1",
"loadBalancingScheme": "EXTERNAL",
"target": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/targetPools/project1-pool",
"portRange": "80-80",
"IPProtocol": "TCP",
"creationTimestamp": "2017-05-05T12:00:01.000-07:00",
"id": "111111111111",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/forwardingRules/project1-rule",
"name": "project1-rule"
}
|
"region": "https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1",
"loadBalancingScheme": "EXTERNAL",
"target": "https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1/targetPools/project2-pool",
"portRange": "80-80",
"IPProtocol": "TCP",
"creationTimestamp": "2017-05-05T12:00:01.000-07:00",
"id": "222222222222",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1/forwardingRules/project2-rule",
"name": "project2-rule"
}
FAKE_API_RESPONSE1 = [FAKE_FORWARDING_RULE1]
FAKE_API_RESPONSE2 = [FAKE_FORWARDING_RULE2]
FAKE_PROJECT_FWD_RULES_MAP = {
'project1': [FAKE_FORWARDING_RULE1],
'project2': [FAKE_FORWARDING_RULE2],
}
EXPECTED_LOADABLE_FWD_RULES = [
{'project_id': 'project1',
'description': '',
'ip_address': '10.10.10.1',
'region': 'https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1',
'backend_service': None,
'load_balancing_scheme': 'EXTERNAL',
'target': 'https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/targetPools/project1-pool',
'port_range': '80-80',
'ports': '[]',
'ip_protocol': 'TCP',
'creation_timestamp': '2017-05-05 12:00:01',
'id': '111111111111',
'name': 'project1-rule',
'network': None,
'subnetwork': None,
'raw_forwarding_rule': parser.json_stringify(FAKE_FORWARDING_RULE1),
},
{'project_id': 'project2',
'description': '',
'ip_address': '10.10.10.2',
'region': 'https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1',
'backend_service': None,
'load_balancing_scheme': 'EXTERNAL',
'target': 'https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1/targetPools/project2-pool',
'port_range': '80-80',
'ports': '[]',
'ip_protocol': 'TCP',
'creation_timestamp': '2017-05-05 12:00:01',
'id': '222222222222',
'name': 'project2-rule',
'network': None,
'subnetwork': None,
'raw_forwarding_rule': parser.json_stringify(FAKE_FORWARDING_RULE2),
},
]
|
FAKE_FORWARDING_RULE2 = {
"kind": "compute#forwardingRule",
"description": "",
"IPAddress": "10.10.10.2",
|
b.py
|
#!/usr/bin/python
import sys
class InvalidWeightError(Exception):
def __init__(self, needed, actual):
super(InvalidWeightError, self).__init__()
self.needed = needed
self.actual = actual
class Program(object):
def __init__(self, desc):
super(Program, self).__init__()
i = desc.find(' ')
self.name = desc[0:i]
i = desc.find('(',i+1)
j = desc.find(')',i+1)
self.weight = int(desc[i+1:j])
i = desc.find('-> ', j+1)
if i == -1:
self.children = set()
else:
self.children = set(desc[i+3:].split(', '))
def get_weight(self, cache):
if self.children:
w = [(c, c.get_weight(cache))
for c in [cache[x] for x in self.children]]
w0 = w[0][1]
p0 = w[0][0]
a = 1
b = 0
for i in xrange(1, len(w)):
if w[i][1] == w0:
a = a + 1
else:
w1 = w[i][1]
p1 = w[i][0]
b = b + 1
if len(w) == a:
return a * w0 + self.weight
elif a == 1:
raise InvalidWeightError(w1 - w0 + p0.weight, p0.weight)
else:
raise InvalidWeightError(w0 - w1 + p1.weight, p1.weight)
else:
return self.weight
def programs(filename):
with open(filename, 'r') as f:
for l in f:
line = l.strip()
if line:
yield Program(line)
def main(filename):
cache = {}
candidates = []
children = set()
for prog in programs(filename):
cache[prog.name] = prog
if prog.children:
candidates.append(prog.name)
children = children | prog.children
|
root = cache[c]
break
try:
root.get_weight(cache)
except InvalidWeightError as e:
print('Got {} needed {}'.format(e.actual, e.needed))
else:
print('Did not find error')
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main('input.txt')
|
for c in candidates:
if c not in children:
|
trait-bounds-not-on-bare-trait.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo {
fn dummy(&self) { }
}
// This should emit the less confusing error, not the more confusing one.
fn foo(_x: Foo + Send)
|
fn main() { }
|
{
//~^ ERROR the trait `std::marker::Sized` is not implemented
}
|
fabfile.py
|
#!/usr/bin/env python
import os
from fabric.api import * # noqa
from fabric.colors import green, yellow
from fabric.contrib.console import confirm
from prettyprint import pp
import re
VERSION_PATTERN = r'^v\d+(\.\d+)+?$'
env.releases_directory = "release"
env.root_dir = os.path.abspath(os.path.dirname(__file__))
env.release = "HEAD"
proxy = os.environ.get('http_proxy', None)
env.http_proxy = env.http_proxy_port = None
if proxy is not None:
env.http_proxy, env.http_proxy_port = proxy.rsplit(":")
def latest_git_tag():
description = local('git describe master', capture=True).rstrip('\n')
if '-' in description:
|
else:
latest_tag = description
if not re.match(VERSION_PATTERN, latest_tag):
latest_tag = None
return latest_tag
def compare_versions(x, y):
"""
Expects 2 strings in the format of 'X.Y.Z' where X, Y and Z are
integers. It will compare the items which will organize things
properly by their major, minor and bugfix version.
::
>>> my_list = ['v1.13', 'v1.14.2', 'v1.14.1', 'v1.9', 'v1.1']
>>> sorted(my_list, cmp=compare_versions)
['v1.1', 'v1.9', 'v1.13', 'v1.14.1', 'v1.14.2']
"""
def version_to_tuple(version):
# Trim off the leading v
version_list = version[1:].split('.', 2)
if len(version_list) <= 3:
[version_list.append(0) for _ in range(3 - len(version_list))]
try:
return tuple((int(version) for version in version_list))
except ValueError: # not an integer, so it goes to the bottom
return (0, 0, 0)
x_major, x_minor, x_bugfix = version_to_tuple(x)
y_major, y_minor, y_bugfix = version_to_tuple(y)
return (cmp(x_major, y_major) or cmp(x_minor, y_minor)
or cmp(x_bugfix, y_bugfix))
def make_tag():
if confirm(yellow("Tag this release?"), default=True):
print(green("The last 5 tags were: "))
tags = local('git tag | tail -n 20', capture=True)
pp(sorted(tags.split('\n'), compare_versions, reverse=True))
prompt("New release tag in the format vX.Y[.Z]?", 'tag',
validate=VERSION_PATTERN)
local('git tag -as %(tag)s' % env)
local('git push origin', capture=True)
local('git push --tags origin', capture=True)
local('git fetch --tags origin', capture=True)
@task
def release():
make_tag()
@task
def sequence_diagrams():
with lcd("docs/sequences"):
local("make")
def release_descriptor(path):
with lcd(path):
return local('git describe HEAD', capture=True).rstrip("\n")
|
latest_tag = description[:description.find('-')]
|
hardyvm.py
|
# Configuration for Joe's machines
"""Copyright (c) 2005-2018, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
petsc_2_2_path = None
petsc_2_3_path = '../../petsc-2.3.2-p10/'
petsc_3_0_path = None
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
petsc_build_name_production = 'linux-intel-opt-mkl'
|
icpc = 'icpc'
other_includepaths = ['../../hdf5/include',
'../../xsd-2.3.1-i686-linux-gnu/libxsd', parmetis_path]
other_libpaths = [os.path.join(petsc_2_3_path, 'externalpackages/f2cblaslapack/linux-gnu-opt/'),
'../../hdf5/lib', parmetis_path]
blas_lapack = ['f2clapack', 'f2cblas']
blas_lapack_production = ['mkl_lapack', 'mkl', 'svml']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c', 'hdf5', 'z', 'parmetis', 'metis']
tools = {'mpirun': '../../mpi/bin/mpirun',
'mpicxx': '../../mpi/bin/mpicxx',
'xsd': '../../xsd-2.3.1-i686-linux-gnu/bin/xsd'}
use_vtk = True
if use_vtk:
other_libraries.extend(['vtkGraphics', 'vtkFiltering', 'vtkIO', 'vtkCommon', 'z'])
other_includepaths.extend(['/usr/include/vtk-5.0/'])
do_inf_tests = 1
|
dealii_path = None
parmetis_path = '../../ParMetis-3.1'
intel_path = ''
|
mod.rs
|
//
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use crate::generated::gremlin as pb;
use crate::process::traversal::step::util::StepSymbol;
#[enum_dispatch]
pub trait Step: 'static {
fn get_symbol(&self) -> StepSymbol;
fn get_tags(&self) -> BitSet {
unreachable!()
}
fn get_remove_tags(&self) -> BitSet {
unreachable!()
}
}
impl Step for pb::GremlinStep {
fn get_symbol(&self) -> StepSymbol {
// TODO: return StepSymbol according to different gremlin step
unimplemented!()
}
fn get_tags(&self) -> BitSet {
let mut tags = BitSet::with_capacity(INIT_TAG_NUM);
for step_tag in &self.tags {
let tag = Tag::from_pb(step_tag.clone()).unwrap();
tags.insert(tag as usize);
}
tags
}
fn get_remove_tags(&self) -> BitSet
|
}
mod by_key;
mod filter;
mod flat_map;
mod fold;
mod group_by;
mod map;
mod order_by;
mod sink;
mod source;
mod sub_traversal;
mod traverser_router;
mod util;
use crate::structure::{Tag, INIT_TAG_NUM};
use crate::FromPb;
use bit_set::BitSet;
pub use filter::FilterFuncGen;
pub use flat_map::FlatMapFuncGen;
pub use fold::{AccumFactoryGen, TraverserAccumulator};
pub use group_by::KeyFunctionGen;
pub use map::MapFuncGen;
pub use map::ResultProperty;
pub use order_by::CompareFunctionGen;
pub use sink::TraverserSinkEncoder;
pub use source::graph_step_from;
pub use source::GraphVertexStep;
pub use sub_traversal::TraverserLeftJoinGen;
pub use traverser_router::Router;
pub use util::*;
|
{
let mut tags = BitSet::with_capacity(INIT_TAG_NUM);
for step_tag in &self.remove_tags {
let tag = Tag::from_pb(step_tag.clone()).unwrap();
tags.insert(tag as usize);
}
tags
}
|
useAnimatedToggle.ts
|
import { useRef, useState } from 'react';
export default function useAnimatedToggle() {
const [mounted, setMounted] = useState(false);
const [startAnimatingOut, setStartAnimatingOut] = useState(false);
const toggleRef = useRef(null);
const open = () => {
setMounted(true);
};
const close = () => {
setStartAnimatingOut(true);
};
const toggle = () => {
const func = mounted ? close : open;
func();
};
const onAnimationEnd = () => {
if (startAnimatingOut) {
setMounted(false);
setStartAnimatingOut(false);
}
};
return {
close,
|
toggle,
toggleRef,
};
}
|
mounted,
onAnimationEnd,
startAnimatingOut,
|
buildPromotion.js
|
const tl = require('vsts-task-lib/task');
const utils = require('artifactory-tasks-utils');
const cliPromoteCommand = "rt bpr";
function
|
(cliPath) {
let buildDefinition = tl.getVariable('Build.DefinitionName');
let buildNumber = tl.getVariable('Build.BuildNumber');
// Get input parameters
let artifactoryService = tl.getInput("artifactoryService", false);
let artifactoryUrl = tl.getEndpointUrl(artifactoryService, false);
let targetRepo = tl.getInput("targetRepo", true);
let cliCommand = utils.cliJoin(cliPath, cliPromoteCommand, utils.quote(buildDefinition), utils.quote(buildNumber), utils.quote(targetRepo), "--url=" + utils.quote(artifactoryUrl));
cliCommand = utils.addArtifactoryCredentials(cliCommand, artifactoryService);
cliCommand = utils.addStringParam(cliCommand, "status", "status");
cliCommand = utils.addStringParam(cliCommand, "comment", "comment");
cliCommand = utils.addStringParam(cliCommand, "sourceRepo", "source-repo");
cliCommand = utils.addBoolParam(cliCommand, "includeDependencies", "include-dependencies");
cliCommand = utils.addBoolParam(cliCommand, "copy", "copy");
cliCommand = utils.addBoolParam(cliCommand, "dryRun", "dry-run");
let taskRes = utils.executeCliCommand(cliCommand, process.cwd());
if (taskRes) {
tl.setResult(tl.TaskResult.Failed, taskRes);
} else {
tl.setResult(tl.TaskResult.Succeeded, "Build Succeeded.");
}
}
utils.executeCliTask(RunTaskCbk);
|
RunTaskCbk
|
jquery.min.js
|
/*! jQuery v1.7.1 jquery.com | jquery.org/license */
// ---> http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js
(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cv(a){if(!ck[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){cl||(cl=c.createElement("iframe"),cl.frameBorder=cl.width=cl.height=0),b.appendChild(cl);if(!cm||!cl.createElement)cm=(cl.contentWindow||cl.contentDocument).document,cm.write((c.compatMode==="CSS1Compat"?"<!doctype html>":"")+"<html><body>"),cm.close();d=cm.createElement(a),cm.body.appendChild(d),e=f.css(d,"display"),b.removeChild(cl)}ck[a]=e}return ck[a]}function cu(a,b){var c={};f.each(cq.concat.apply([],cq.slice(0,b)),function(){c[this]=a});return c}function ct(){cr=b}function cs(){setTimeout(ct,0);return cr=f.now()}function cj(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ci(){try{return new a.XMLHttpRequest}catch(b){}}function cc(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g<i;g++){if(g===1)for(h in a.converters)typeof h=="string"&&(e[h.toLowerCase()]=a.converters[h]);l=k,k=d[g];if(k==="*")k=l;else if(l!=="*"&&l!==k){m=l+" "+k,n=e[m]||e["* "+k];if(!n){p=b;for(o in e){j=o.split(" ");if(j[0]===l||j[0]==="*"){p=e[j[1]+" "+k];if(p){o=e[o],o===!0?n=p:p===!0&&(n=o);break}}}}!n&&!p&&f.error("No conversion from "+m.replace(" "," to ")),n!==!0&&(c=n?n(c):p(o(c)))}}return c}function cb(a,c,d){var e=a.contents,f=a.dataTypes,g=a.responseFields,h,i,j,k;for(i in g)i in d&&(c[g[i]]=d[i]);while(f[0]==="*")f.shift(),h===b&&(h=a.mimeType||c.getResponseHeader("content-type"));if(h)for(i in e)if(e[i]&&e[i].test(h)){f.unshift(i);break}if(f[0]in d)j=f[0];else{for(i in d){if(!f[0]||a.converters[i+" "+f[0]]){j=i;break}k||(k=i)}j=j||k}if(j){j!==f[0]&&f.unshift(j);return d[j]}}function ca(a,b,c,d){if(f.isArray(b))f.each(b,function(b,e){c||bE.test(a)?d(a,e):ca(a+"["+(typeof e=="object"||f.isArray(e)?b:"")+"]",e,c,d)});else if(!c&&b!=null&&typeof b=="object")for(var e in b)ca(a+"["+e+"]",b[e],c,d);else d(a,b)}function b_(a,c){var d,e,g=f.ajaxSettings.flatOptions||{};for(d in c)c[d]!==b&&((g[d]?a:e||(e={}))[d]=c[d]);e&&f.extend(!0,a,e)}function b$(a,c,d,e,f,g){f=f||c.dataTypes[0],g=g||{},g[f]=!0;var h=a[f],i=0,j=h?h.length:0,k=a===bT,l;for(;i<j&&(k||!l);i++)l=h[i](c,d,e),typeof l=="string"&&(!k||g[l]?l=b:(c.dataTypes.unshift(l),l=b$(a,c,d,e,l,g)));(k||!l)&&!g["*"]&&(l=b$(a,c,d,e,"*",g));return l}function bZ(a){return function(b,c){typeof b!="string"&&(c=b,b="*");if(f.isFunction(c)){var d=b.toLowerCase().split(bP),e=0,g=d.length,h,i,j;for(;e<g;e++)h=d[e],j=/^\+/.test(h),j&&(h=h.substr(1)||"*"),i=a[h]=a[h]||[],i[j?"unshift":"push"](c)}}}function bC(a,b,c){var d=b==="width"?a.offsetWidth:a.offsetHeight,e=b==="width"?bx:by,g=0,h=e.length;if(d>0){if(c!=="border")for(;g<h;g++)c||(d-=parseFloat(f.css(a,"padding"+e[g]))||0),c==="margin"?d+=parseFloat(f.css(a,c+e[g]))||0:d-=parseFloat(f.css(a,"border"+e[g]+"Width"))||0;return d+"px"}d=bz(a,b,b);if(d<0||d==null)d=a.style[b]||0;d=parseFloat(d)||0;if(c)for(;g<h;g++)d+=parseFloat(f.css(a,"padding"+e[g]))||0,c!=="padding"&&(d+=parseFloat(f.css(a,"border"+e[g]+"Width"))||0),c==="margin"&&(d+=parseFloat(f.css(a,c+e[g]))||0);return d+"px"}function bp(a,b){b.src?f.ajax({url:b.src,async:!1,dataType:"script"}):f.globalEval((b.text||b.textContent||b.innerHTML||"").replace(bf,"/*$0*/")),b.parentNode&&b.parentNode.removeChild(b)}function bo(a){var b=c.createElement("div");bh.appendChild(b),b.innerHTML=a.outerHTML;return b.firstChild}function bn(a){var b=(a.nodeName||"").toLowerCase();b==="input"?bm(a):b!=="script"&&typeof a.getElementsByTagName!="undefined"&&f.grep(a.getElementsByTagName("input"),bm)}function bm(a){if(a.type==="checkbox"||a.type==="radio")a.defaultChecked=a.checked}function bl(a){return typeof a.getElementsByTagName!="undefined"?a.getElementsByTagName("*"):typeof a.querySelectorAll!="undefined"?a.querySelectorAll("*"):[]}function bk(a,b){var c;if(b.nodeType===1){b.clearAttributes&&b.clearAttributes(),b.mergeAttributes&&b.mergeAttributes(a),c=b.nodeName.toLowerCase();if(c==="object")b.outerHTML=a.outerHTML;else if(c!=="input"||a.type!=="checkbox"&&a.type!=="radio"){if(c==="option")b.selected=a.defaultSelected;else if(c==="input"||c==="textarea")b.defaultValue=a.defaultValue}else a.checked&&(b.defaultChecked=b.checked=a.checked),b.value!==a.value&&(b.value=a.value);b.removeAttribute(f.expando)}}function bj(a,b){if(b.nodeType===1&&!!f.hasData(a)){var c,d,e,g=f._data(a),h=f._data(b,g),i=g.events;if(i){delete h.handle,h.events={};for(c in i)for(d=0,e=i[c].length;d<e;d++)f.event.add(b,c+(i[c][d].namespace?".":"")+i[c][d].namespace,i[c][d],i[c][d].data)}h.data&&(h.data=f.extend({},h.data))}}function bi(a,b){return f.nodeName(a,"table")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function U(a){var b=V.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}function T(a,b,c){b=b||0;if(f.isFunction(b))return f.grep(a,function(a,d){var e=!!b.call(a,d,a);return e===c});if(b.nodeType)return f.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var d=f.grep(a,function(a){return a.nodeType===1});if(O.test(b))return f.filter(b,d,!c);b=f.filter(b,d)}return f.grep(a,function(a,d){return f.inArray(a,b)>=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?parseFloat(d):j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c<d;c++)b[a[c]]=!0;return b}var c=a.document,d=a.navigator,e=a.location,f=function(){function J(){if(!e.isReady){try{c.documentElement.doScroll("left")}catch(a){setTimeout(J,1);return}e.ready()}}var e=function(a,b){return new e.fn.init(a,b,h)},f=a.jQuery,g=a.$,h,i=/^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.1",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j<k;j++)if((a=arguments[j])!=null)for(c in a){d=i[c],f=a[c];if(i===f)continue;l&&f&&(e.isPlainObject(f)||(g=e.isArray(f)))?(g?(g=!1,h=d&&e.isArray(d)?d:[]):h=d&&e.isPlainObject(d)?d:{},i[c]=e.extend(l,h,f)):f!==b&&(i[c]=f)}return i},e.extend({noConflict:function(b){a.$===e&&(a.$=g),b&&a.jQuery===e&&(a.jQuery=f);return e},isReady:!1,readyWait:1,holdReady:function(a){a?e.readyWait++:e.ready(!0)},ready:function(a){if(a===!0&&!--e.readyWait||a!==!0&&!e.isReady){if(!c.body)return setTimeout(e.ready,1);e.isReady=!0;if(a!==!0&&--e.readyWait>0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a&&typeof a=="object"&&"setInterval"in a},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g<h;)if(c.apply(a[g++],d)===!1)break}else if(i){for(f in a)if(c.call(a[f],f,a[f])===!1)break}else for(;g<h;)if(c.call(a[g],g,a[g++])===!1)break;return a},trim:G?function(a){return a==null?"":G.call(a)}:function(a){return a==null?"":(a+"").replace(k,"").replace(l,"")},makeArray:function(a,b){var c=b||[];if(a!=null){var d=e.type(a);a.length==null||d==="string"||d==="function"||d==="regexp"||e.isWindow(a)?E.call(c,a):e.merge(c,a)}return c},inArray:function(a,b,c){var d;if(b){if(H)return H.call(b,a,c);d=b.length,c=c?c<0?Math.max(0,d+c):c:0;for(;c<d;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,c){var d=a.length,e=0;if(typeof c.length=="number")for(var f=c.length;e<f;e++)a[d++]=c[e];else while(c[e]!==b)a[d++]=c[e++];a.length=d;return a},grep:function(a,b,c){var d=[],e;c=!!c;for(var f=0,g=a.length;f<g;f++)e=!!b(a[f],f),c!==e&&d.push(a[f]);return d},map:function(a,c,d){var f,g,h=[],i=0,j=a.length,k=a instanceof e||j!==b&&typeof j=="number"&&(j>0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i<j;i++)f=c(a[i],i,d),f!=null&&(h[h.length]=f);else for(g in a)f=c(a[g],g,d),f!=null&&(h[h.length]=f);return h.concat.apply([],h)},guid:1,proxy:function(a,c){if(typeof c=="string"){var d=a[c];c=a,a=d}if(!e.isFunction(a))return b;var f=F.call(arguments,2),g=function(){return a.apply(c,f.concat(F.call(arguments)))};g.guid=a.guid=a.guid||g.guid||e.guid++;return g},access:function(a,c,d,f,g,h){var i=a.length;if(typeof c=="object"){for(var j in c)e.access(a,j,c[j],f,g,d);return a}if(d!==b){f=!h&&f&&e.isFunction(d);for(var k=0;k<i;k++)g(a[k],c,f?d.call(a[k],k,g(a[k],c)):d,h);return a}return i?g(a[0],c):b},now:function(){return(new Date).getTime()},uaMatch:function(a){a=a.toLowerCase();var b=r.exec(a)||s.exec(a)||t.exec(a)||a.indexOf("compatible")<0&&u.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},sub:function(){function a(b,c){return new a.fn.init(b,c)}e.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.sub=this.sub,a.fn.init=function(d,f){f&&f instanceof e&&!(f instanceof a)&&(f=a(f));return e.fn.init.call(this,d,f,b)},a.fn.init.prototype=a.fn;var b=a(c);return a},browser:{}}),e.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(a,b){I["[object "+b+"]"]=b.toLowerCase()}),z=e.uaMatch(y),z.browser&&(e.browser[z.browser]=!0,e.browser.version=z.version),e.browser.webkit&&(e.browser.safari=!0),j.test(" ")&&(k=/^[\s\xA0]+/,l=/[\s\xA0]+$/),h=e(c),c.addEventListener?B=function(){c.removeEventListener("DOMContentLoaded",B,!1),e.ready()}:c.attachEvent&&(B=function(){c.readyState==="complete"&&(c.detachEvent("onreadystatechange",B),e.ready())});return e}(),g={};f.Callbacks=function(a){a=a?g[a]||h(a):{};var c=[],d=[],e,i,j,k,l,m=function(b){var d,e,g,h,i;for(d=0,e=b.length;d<e;d++)g=b[d],h=f.type(g),h==="array"?m(g):h==="function"&&(!a.unique||!o.has(g))&&c.push(g)},n=function(b,f){f=f||[],e=!a.memory||[b,f],i=!0,l=j||0,j=0,k=c.length;for(;c&&l<k;l++)if(c[l].apply(b,f)===!1&&a.stopOnFalse){e=!0;break}i=!1,c&&(a.once?e===!0?o.disable():c=[]:d&&d.length&&(e=d.shift(),o.fireWith(e[0],e[1])))},o={add:function(){if(c){var a=c.length;m(arguments),i?k=c.length:e&&e!==!0&&(j=a,n(e[0],e[1]))}return this},remove:function(){if(c){var b=arguments,d=0,e=b.length;for(;d<e;d++)for(var f=0;f<c.length;f++)if(b[d]===c[f]){i&&f<=k&&(k--,f<=l&&l--),c.splice(f--,1);if(a.unique)break}}return this},has:function(a){if(c){var b=0,d=c.length;for(;b<d;b++)if(a===c[b])return!0}return!1},empty:function(){c=[];return this},disable:function(){c=d=e=b;return this},disabled:function(){return!c},lock:function(){d=b,(!e||e===!0)&&o.disable();return this},locked:function(){return!d},fireWith:function(b,c){d&&(i?a.once||d.push([b,c]):(!a.once||!e)&&n(b,c));return this},fire:function(){o.fireWith(this,arguments);return this},fired:function(){return!!e}};return o};var i=[].slice;f.extend({Deferred:function(a){var b=f.Callbacks("once memory"),c=f.Callbacks("once memory"),d=f.Callbacks("memory"),e="pending",g={resolve:b,reject:c,notify:d},h={done:b.add,fail:c.add,progress:d.add,state:function(){return e},isResolved:b.fired,isRejected:c.fired,then:function(a,b,c){i.done(a).fail(b).progress(c);return this},always:function(){i.done.apply(i,arguments).fail.apply(i,arguments);return this},pipe:function(a,b,c){return f.Deferred(function(d){f.each({done:[a,"resolve"],fail:[b,"reject"],progress:[c,"notify"]},function(a,b){var c=b[0],e=b[1],g;f.isFunction(c)?i[a](function(){g=c.apply(this,arguments),g&&f.isFunction(g.promise)?g.promise().then(d.resolve,d.reject,d.notify):d[e+"With"](this===i?d:this,[g])}):i[a](d[e])})}).promise()},promise:function(a){if(a==null)a=h;else for(var b in h)a[b]=h[b];return a}},i=h.promise({}),j;for(j in g)i[j]=g[j].fire,i[j+"With"]=g[j].fireWith;i.done(function(){e="resolved"},c.disable,d.lock).fail(function(){e="rejected"},b.disable,d.lock),a&&a.call(i,i);return i},when:function(a){function m(a){return function(b){e[a]=arguments.length>1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c<d;c++)b[c]&&b[c].promise&&f.isFunction(b[c].promise)?b[c].promise().then(l(c),j.reject,m(c)):--g;g||j.resolveWith(j,b)}else j!==a&&j.resolveWith(j,d?[a]:[]);return k}}),f.support=function(){var b,d,e,g,h,i,j,k,l,m,n,o,p,q=c.createElement("div"),r=c.documentElement;q.setAttribute("className","t"),q.innerHTML=" <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>",d=q.getElementsByTagName("*"),e=q.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=q.getElementsByTagName("input")[0],b={leadingWhitespace:q.firstChild.nodeType===3,tbody:!q.getElementsByTagName("tbody").length,htmlSerialize:!!q.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:q.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav></:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0},i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete q.test}catch(s){b.deleteExpando=!1}!q.addEventListener&&q.attachEvent&&q.fireEvent&&(q.attachEvent("onclick",function(){b.noCloneEvent=!1}),q.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),q.appendChild(i),k=c.createDocumentFragment(),k.appendChild(q.lastChild),b.checkClone=k.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,k.removeChild(i),k.appendChild(q),q.innerHTML="",a.getComputedStyle&&(j=c.createElement("div"),j.style.width="0",j.style.marginRight="0",q.style.width="2px",q.appendChild(j),b.reliableMarginRight=(parseInt((a.getComputedStyle(j,null)||{marginRight:0}).marginRight,10)||0)===0);if(q.attachEvent)for(o in{submit:1,change:1,focusin:1})n="on"+o,p=n in q,p||(q.setAttribute(n,"return;"),p=typeof q[n]=="function"),b[o+"Bubbles"]=p;k.removeChild(q),k=g=h=j=q=i=null,f(function(){var a,d,e,g,h,i,j,k,m,n,o,r=c.getElementsByTagName("body")[0];!r||(j=1,k="position:absolute;top:0;left:0;width:1px;height:1px;margin:0;",m="visibility:hidden;border:0;",n="style='"+k+"border:5px solid #000;padding:0;'",o="<div "+n+"><div></div></div>"+"<table "+n+" cellpadding='0' cellspacing='0'>"+"<tr><td></td></tr></table>",a=c.createElement("div"),a.style.cssText=m+"width:0;height:0;position:static;top:0;margin-top:"+j+"px",r.insertBefore(a,r.firstChild),q=c.createElement("div"),a.appendChild(q),q.innerHTML="<table><tr><td style='padding:0;border:0;display:none'></td><td>t</td></tr></table>",l=q.getElementsByTagName("td"),p=l[0].offsetHeight===0,l[0].style.display="",l[1].style.display="none",b.reliableHiddenOffsets=p&&l[0].offsetHeight===0,q.innerHTML="",q.style.width=q.style.paddingLeft="1px",f.boxModel=b.boxModel=q.offsetWidth===2,typeof q.style.zoom!="undefined"&&(q.style.display="inline",q.style.zoom=1,b.inlineBlockNeedsLayout=q.offsetWidth===2,q.style.display="",q.innerHTML="<div style='width:4px;'></div>",b.shrinkWrapBlocks=q.offsetWidth!==2),q.style.cssText=k+m,q.innerHTML=o,d=q.firstChild,e=d.firstChild,h=d.nextSibling.firstChild.firstChild,i={doesNotAddBorder:e.offsetTop!==5,doesAddBorderForTableAndCells:h.offsetTop===5},e.style.position="fixed",e.style.top="20px",i.fixedPosition=e.offsetTop===20||e.offsetTop===15,e.style.position=e.style.top="",d.style.overflow="hidden",d.style.position="relative",i.subtractsBorderForOverflowNotVisible=e.offsetTop===-5,i.doesNotIncludeMarginInBodyOffset=r.offsetTop!==j,r.removeChild(a),q=a=null,f.extend(b,i))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e<g;e++)delete d[b[e]];if(!(c?m:f.isEmptyObject)(d))return}}if(!c){delete j[k].data;if(!m(j[k]))return}f.support.deleteExpando||!j.setInterval?delete j[k]:j[k]=null,i&&(f.support.deleteExpando?delete a[h]:a.removeAttribute?a.removeAttribute(h):a[h]=null)}},_data:function(a,b,c){return f.data(a,b,c,!0)},acceptData:function(a){if(a.nodeName){var b=f.noData[a.nodeName.toLowerCase()];if(b)return b!==!0&&a.getAttribute("classid")===b}return!0}}),f.fn.extend({data:function(a,c){var d,e,g,h=null;if(typeof a=="undefined"){if(this.length){h=f.data(this[0]);if(this[0].nodeType===1&&!f._data(this[0],"parsedAttrs")){e=this[0].attributes;for(var i=0,j=e.length;i<j;i++)g=e[i].name,g.indexOf("data-")===0&&(g=f.camelCase(g.substring(5)),l(this[0],g,h[g]));f._data(this[0],"parsedAttrs",!0)}}return h}if(typeof a=="object")return this.each(function(){f.data(this,a)});d=a.split("."),d[1]=d[1]?"."+d[1]:"";if(c===b){h=this.triggerHandler("getData"+d[1]+"!",[d[0]]),h===b&&this.length&&(h=f.data(this[0],a),h=l(this[0],a,h));return h===b&&d[1]?this.data(d[0]):h}return this.each(function(){var b=f(this),e=[d[0],c];b.triggerHandler("setData"+d[1]+"!",e),f.data(this,a,c),b.triggerHandler("changeData"+d[1]+"!",e)})},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){typeof a!="string"&&(c=a,a="fx");if(c===b)return f.queue(this[0],a);return this.each(function(){var b=f.queue(this,a,c);a==="fx"&&b[0]!=="inprogress"&&f.dequeue(this,a)})},dequeue:function(a){return this.each(function(){f.dequeue(this,a)})},delay:function(a,b){a=f.fx?f.fx.speeds[a]||a:a,b=b||"fx";return this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,c){function m(){--h||d.resolveWith(e,[e])}typeof a!="string"&&(c=a,a=b),a=a||"fx";var d=f.Deferred(),e=this,g=e.length,h=1,i=a+"defer",j=a+"queue",k=a+"mark",l;while(g--)if(l=f.data(e[g],i,b,!0)||(f.data(e[g],j,b,!0)||f.data(e[g],k,b,!0))&&f.data(e[g],i,f.Callbacks("once memory"),!0))h++,l.add(m);m();return d.promise()}});var o=/[\n\t\r]/g,p=/\s+/,q=/\r/g,r=/^(?:button|input)$/i,s=/^(?:button|input|object|select|textarea)$/i,t=/^a(?:rea)?$/i,u=/^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,v=f.support.getSetAttribute,w,x,y;f.fn.extend({attr:function(a,b){return f.access(this,a,b,!0,f.attr)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,a,b,!0,f.prop)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c<d;c++){e=this[c];if(e.nodeType===1)if(!e.className&&b.length===1)e.className=a;else{g=" "+e.className+" ";for(h=0,i=b.length;h<i;h++)~g.indexOf(" "+b[h]+" ")||(g+=b[h]+" ");e.className=f.trim(g)}}}return this},removeClass:function(a){var c,d,e,g,h,i,j;if(f.isFunction(a))return this.each(function(b){f(this).removeClass(a.call(this,b,this.className))});if(a&&typeof a=="string"||a===b){c=(a||"").split(p);for(d=0,e=this.length;d<e;d++){g=this[d];if(g.nodeType===1&&g.className)if(a){h=(" "+g.className+" ").replace(o," ");for(i=0,j=c.length;i<j;i++)h=h.replace(" "+c[i]+" "," ");g.className=f.trim(h)}else g.className=""}}return this},toggleClass:function(a,b){var c=typeof a,d=typeof b=="boolean";if(f.isFunction(a))return this.each(function(c){f(this).toggleClass(a.call(this,c,this.className,b),b)});return this.each(function(){if(c==="string"){var e,g=0,h=f(this),i=b,j=a.split(p);while(e=j[g++])i=d?i:!h.hasClass(e),h[i?"addClass":"removeClass"](e)}else if(c==="undefined"||c==="boolean")this.className&&f._data(this,"__className__",this.className),this.className=this.className||a===!1?"":f._data(this,"__className__")||""})},hasClass:function(a){var b=" "+a+" ",c=0,d=this.length;for(;c<d;c++)if(this[c].nodeType===1&&(" "+this[c].className+" ").replace(o," ").indexOf(b)>-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.nodeName.toLowerCase()]||f.valHooks[this.type];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.nodeName.toLowerCase()]||f.valHooks[g.type];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c<d;c++){e=i[c];if(e.selected&&(f.support.optDisabled?!e.disabled:e.getAttribute("disabled")===null)&&(!e.parentNode.disabled||!f.nodeName(e.parentNode,"optgroup"))){b=f(e).val();if(j)return b;h.push(b)}}if(j&&!h.length&&i.length)return f(i[g]).val();return h},set:function(a,b){var c=f.makeArray(b);f(a).find("option").each(function(){this.selected=f.inArray(f(this).val(),c)>=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;h<g;h++)e=d[h],e&&(c=f.propFix[e]||e,f.attr(a,e,""),a.removeAttribute(v?e:c),u.test(e)&&c in a&&(a[c]=!1))}},attrHooks:{type:{set:function(a,b){if(r.test(a.nodeName)&&a.parentNode)f.error("type property can't be changed");else if(!f.support.radioValue&&b==="radio"&&f.nodeName(a,"input")){var c=a.value;a.setAttribute("type",b),c&&(a.value=c);return b}}},value:{get:function(a,b){if(w&&f.nodeName(a,"button"))return w.get(a,b);return b in a?a.value:null},set:function(a,b,c){if(w&&f.nodeName(a,"button"))return w.set(a,b,c);a.value=b}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(a,c,d){var e,g,h,i=a.nodeType;if(!!a&&i!==3&&i!==8&&i!==2){h=i!==1||!f.isXMLDoc(a),h&&(c=f.propFix[c]||c,g=f.propHooks[c]);return d!==b?g&&"set"in g&&(e=g.set(a,d,c))!==b?e:a[c]=d:g&&"get"in g&&(e=g.get(a,c))!==null?e:a[c]}},propHooks:{tabIndex:{get:function(a){var c=a.getAttributeNode("tabindex");return c&&c.specified?parseInt(c.value,10):s.test(a.nodeName)||t.test(a.nodeName)&&a.href?0:b}}}}),f.attrHooks.tabindex=f.propHooks.tabIndex,x={get:function(a,c){var d,e=f.prop(a,c);return e===!0||typeof e!="boolean"&&(d=a.getAttributeNode(c))&&d.nodeValue!==!1?c.toLowerCase():b},set:function(a,b,c){var d;b===!1?f.removeAttr(a,c):(d=f.propFix[c]||c,d in a&&(a[d]=!0),a.setAttribute(c,c.toLowerCase()));return c}},v||(y={name:!0,id:!0},w=f.valHooks.button={get:function(a,c){var d;d=a.getAttributeNode(c);return d&&(y[c]?d.nodeValue!=="":d.specified)?d.nodeValue:b},set:function(a,b,d){var e=a.getAttributeNode(d);e||(e=c.createAttribute(d),a.setAttributeNode(e));return e.nodeValue=b+""}},f.attrHooks.tabindex.set=w.set,f.each(["width","height"],function(a,b){f.attrHooks[b]=f.extend(f.attrHooks[b],{set:function(a,c){if(c===""){a.setAttribute(b,"auto");return c}}})}),f.attrHooks.contenteditable={get:w.get,set:function(a,b,c){b===""&&(b="false"),w.set(a,b,c)}}),f.support.hrefNormalized||f.each(["href","src","width","height"],function(a,c){f.attrHooks[c]=f.extend(f.attrHooks[c],{get:function(a){var d=a.getAttribute(c,2);return d===null?b:d}})}),f.support.style||(f.attrHooks.style={get:function(a){return a.style.cssText.toLowerCase()||b},set:function(a,b){return a.style.cssText=""+b}}),f.support.optSelected||(f.propHooks.selected=f.extend(f.propHooks.selected,{get:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex);return null}})),f.support.enctype||(f.propFix.enctype="encoding"),f.support.checkOn||f.each(["radio","checkbox"],function(){f.valHooks[this]={get:function(a){return a.getAttribute("value")===null?"on":a.value}}}),f.each(["radio","checkbox"],function(){f.valHooks[this]=f.extend(f.valHooks[this],{set:function(a,b){if(f.isArray(b))return a.checked=f.inArray(f(a).val(),b)>=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/\bhover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function(a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};
f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k<c.length;k++){l=A.exec(c[k])||[],m=l[1],n=(l[2]||"").split(".").sort(),s=f.event.special[m]||{},m=(g?s.delegateType:s.bindType)||m,s=f.event.special[m]||{},o=f.extend({type:m,origType:l[1],data:e,handler:d,guid:d.guid,selector:g,quick:G(g),namespace:n.join(".")},p),r=j[m];if(!r){r=j[m]=[],r.delegateCount=0;if(!s.setup||s.setup.call(a,e,n,i)===!1)a.addEventListener?a.addEventListener(m,i,!1):a.attachEvent&&a.attachEvent("on"+m,i)}s.add&&(s.add.call(a,o),o.handler.guid||(o.handler.guid=d.guid)),g?r.splice(r.delegateCount++,0,o):r.push(o),f.event.global[m]=!0}a=null}},global:{},remove:function(a,b,c,d,e){var g=f.hasData(a)&&f._data(a),h,i,j,k,l,m,n,o,p,q,r,s;if(!!g&&!!(o=g.events)){b=f.trim(I(b||"")).split(" ");for(h=0;h<b.length;h++){i=A.exec(b[h])||[],j=k=i[1],l=i[2];if(!j){for(j in o)f.event.remove(a,j+b[h],c,d,!0);continue}p=f.event.special[j]||{},j=(d?p.delegateType:p.bindType)||j,r=o[j]||[],m=r.length,l=l?new RegExp("(^|\\.)"+l.split(".").sort().join("\\.(?:.*\\.)?")+"(\\.|$)"):null;for(n=0;n<r.length;n++)s=r[n],(e||k===s.origType)&&(!c||c.guid===s.guid)&&(!l||l.test(s.namespace))&&(!d||d===s.selector||d==="**"&&s.selector)&&(r.splice(n--,1),s.selector&&r.delegateCount--,p.remove&&p.remove.call(a,s));r.length===0&&m!==r.length&&((!p.teardown||p.teardown.call(a,l)===!1)&&f.removeEvent(a,j,g.handle),delete o[j])}f.isEmptyObject(o)&&(q=g.handle,q&&(q.elem=null),f.removeData(a,["events","handle"],!0))}},customEvent:{getData:!0,setData:!0,changeData:!0},trigger:function(c,d,e,g){if(!e||e.nodeType!==3&&e.nodeType!==8){var h=c.type||c,i=[],j,k,l,m,n,o,p,q,r,s;if(E.test(h+f.event.triggered))return;h.indexOf("!")>=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;l<r.length&&!c.isPropagationStopped();l++)m=r[l][0],c.type=r[l][1],q=(f._data(m,"events")||{})[c.type]&&f._data(m,"handle"),q&&q.apply(m,d),q=o&&m[o],q&&f.acceptData(m)&&q.apply(m,d)===!1&&c.preventDefault();c.type=h,!g&&!c.isDefaultPrevented()&&(!p._default||p._default.apply(e.ownerDocument,d)===!1)&&(h!=="click"||!f.nodeName(e,"a"))&&f.acceptData(e)&&o&&e[h]&&(h!=="focus"&&h!=="blur"||c.target.offsetWidth!==0)&&!f.isWindow(e)&&(n=e[o],n&&(e[o]=null),f.event.triggered=h,e[h](),f.event.triggered=b,n&&(e[o]=n));return c.result}},dispatch:function(c){c=f.event.fix(c||a.event);var d=(f._data(this,"events")||{})[c.type]||[],e=d.delegateCount,g=[].slice.call(arguments,0),h=!c.exclusive&&!c.namespace,i=[],j,k,l,m,n,o,p,q,r,s,t;g[0]=c,c.delegateTarget=this;if(e&&!c.target.disabled&&(!c.button||c.type!=="click")){m=f(this),m.context=this.ownerDocument||this;for(l=c.target;l!=this;l=l.parentNode||this){o={},q=[],m[0]=l;for(j=0;j<e;j++)r=d[j],s=r.selector,o[s]===b&&(o[s]=r.quick?H(l,r.quick):m.is(s)),o[s]&&q.push(r);q.length&&i.push({elem:l,matches:q})}}d.length>e&&i.push({elem:this,matches:d.slice(e)});for(j=0;j<i.length&&!c.isPropagationStopped();j++){p=i[j],c.currentTarget=p.elem;for(k=0;k<p.matches.length&&!c.isImmediatePropagationStopped();k++){r=p.matches[k];if(h||!c.namespace&&!r.namespace||c.namespace_re&&c.namespace_re.test(r.namespace))c.data=r.data,c.handleObj=r,n=((f.event.special[r.origType]||{}).handle||r.handler).apply(p.elem,g),n!==b&&(c.result=n,n===!1&&(c.preventDefault(),c.stopPropagation()))}}return c.result},props:"attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){a.which==null&&(a.which=b.charCode!=null?b.charCode:b.keyCode);return a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,d){var e,f,g,h=d.button,i=d.fromElement;a.pageX==null&&d.clientX!=null&&(e=a.target.ownerDocument||c,f=e.documentElement,g=e.body,a.pageX=d.clientX+(f&&f.scrollLeft||g&&g.scrollLeft||0)-(f&&f.clientLeft||g&&g.clientLeft||0),a.pageY=d.clientY+(f&&f.scrollTop||g&&g.scrollTop||0)-(f&&f.clientTop||g&&g.clientTop||0)),!a.relatedTarget&&i&&(a.relatedTarget=i===a.target?d.toElement:i),!a.which&&h!==b&&(a.which=h&1?1:h&2?3:h&4?2:0);return a}},fix:function(a){if(a[f.expando])return a;var d,e,g=a,h=f.event.fixHooks[a.type]||{},i=h.props?this.props.concat(h.props):this.props;a=f.Event(g);for(d=i.length;d;)e=i[--d],a[e]=g[e];a.target||(a.target=g.srcElement||c),a.target.nodeType===3&&(a.target=a.target.parentNode),a.metaKey===b&&(a.metaKey=a.ctrlKey);return h.filter?h.filter(a,g):a},special:{ready:{setup:f.bindReady},load:{noBubble:!0},focus:{delegateType:"focusin"},blur:{delegateType:"focusout"},beforeunload:{setup:function(a,b,c){f.isWindow(this)&&(this.onbeforeunload=c)},teardown:function(a,b){this.onbeforeunload===b&&(this.onbeforeunload=null)}}},simulate:function(a,b,c,d){var e=f.extend(new f.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?f.event.trigger(e,null,b):f.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},f.event.handle=f.event.dispatch,f.removeEvent=c.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){a.detachEvent&&a.detachEvent("on"+b,c)},f.Event=function(a,b){if(!(this instanceof f.Event))return new f.Event(a,b);a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||a.returnValue===!1||a.getPreventDefault&&a.getPreventDefault()?K:J):this.type=a,b&&f.extend(this,b),this.timeStamp=a&&a.timeStamp||f.now(),this[f.expando]=!0},f.Event.prototype={preventDefault:function(){this.isDefaultPrevented=K;var a=this.originalEvent;!a||(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){this.isPropagationStopped=K;var a=this.originalEvent;!a||(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=K,this.stopPropagation()},isDefaultPrevented:J,isPropagationStopped:J,isImmediatePropagationStopped:J},f.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){f.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c=this,d=a.relatedTarget,e=a.handleObj,g=e.selector,h;if(!d||d!==c&&!f.contains(c,d))a.type=e.origType,h=e.handler.apply(this,arguments),a.type=b;return h}}}),f.support.submitBubbles||(f.event.special.submit={setup:function(){if(f.nodeName(this,"form"))return!1;f.event.add(this,"click._submit keypress._submit",function(a){var c=a.target,d=f.nodeName(c,"input")||f.nodeName(c,"button")?c.form:b;d&&!d._submit_attached&&(f.event.add(d,"submit._submit",function(a){this.parentNode&&!a.isTrigger&&f.event.simulate("submit",this.parentNode,a,!0)}),d._submit_attached=!0)})},teardown:function(){if(f.nodeName(this,"form"))return!1;f.event.remove(this,"._submit")}}),f.support.changeBubbles||(f.event.special.change={setup:function(){if(z.test(this.nodeName)){if(this.type==="checkbox"||this.type==="radio")f.event.add(this,"propertychange._change",function(a){a.originalEvent.propertyName==="checked"&&(this._just_changed=!0)}),f.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1,f.event.simulate("change",this,a,!0))});return!1}f.event.add(this,"beforeactivate._change",function(a){var b=a.target;z.test(b.nodeName)&&!b._change_attached&&(f.event.add(b,"change._change",function(a){this.parentNode&&!a.isSimulated&&!a.isTrigger&&f.event.simulate("change",this.parentNode,a,!0)}),b._change_attached=!0)})},handle:function(a){var b=a.target;if(this!==b||a.isSimulated||a.isTrigger||b.type!=="radio"&&b.type!=="checkbox")return a.handleObj.handler.apply(this,arguments)},teardown:function(){f.event.remove(this,"._change");return z.test(this.nodeName)}}),f.support.focusinBubbles||f.each({focus:"focusin",blur:"focusout"},function(a,b){var d=0,e=function(a){f.event.simulate(b,a.target,f.event.fix(a),!0)};f.event.special[b]={setup:function(){d++===0&&c.addEventListener(a,e,!0)},teardown:function(){--d===0&&c.removeEventListener(a,e,!0)}}}),f.fn.extend({on:function(a,c,d,e,g){var h,i;if(typeof a=="object"){typeof c!="string"&&(d=c,c=b);for(i in a)this.on(i,c,d,a[i],g);return this}d==null&&e==null?(e=c,d=c=b):e==null&&(typeof c=="string"?(e=d,d=b):(e=d,d=c,c=b));if(e===!1)e=J;else if(!e)return this;g===1&&(h=e,e=function(a){f().off(a);return h.apply(this,arguments)},e.guid=h.guid||(h.guid=f.guid++));return this.each(function(){f.event.add(this,a,e,d,c)})},one:function(a,b,c,d){return this.on.call(this,a,b,c,d,1)},off:function(a,c,d){if(a&&a.preventDefault&&a.handleObj){var e=a.handleObj;f(a.delegateTarget).off(e.namespace?e.type+"."+e.namespace:e.type,e.selector,e.handler);return this}if(typeof a=="object"){for(var g in a)this.off(g,c,a[g]);return this}if(c===!1||typeof c=="function")d=c,c=b;d===!1&&(d=J);return this.each(function(){f.event.remove(this,a,d,c)})},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},live:function(a,b,c){f(this.context).on(a,this.selector,b,c);return this},die:function(a,b){f(this.context).off(a,this.selector||"**",b);return this},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return arguments.length==1?this.off(a,"**"):this.off(b,a,c)},trigger:function(a,b){return this.each(function(){f.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0])return f.event.trigger(a,b,this[0],!0)},toggle:function(a){var b=arguments,c=a.guid||f.guid++,d=0,e=function(c){var e=(f._data(this,"lastToggle"+a.guid)||0)%d;f._data(this,"lastToggle"+a.guid,e+1),c.preventDefault();return b[e].apply(this,arguments)||!1};e.guid=c;while(d<b.length)b[d++].guid=c;return this.click(e)},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),f.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){f.fn[b]=function(a,c){c==null&&(c=a,a=null);return arguments.length>0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}if(j.nodeType===1){g||(j[d]=c,j.sizset=h);if(typeof b!="string"){if(j===b){k=!0;break}}else if(m.filter(b,[j]).length>0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}j.nodeType===1&&!g&&(j[d]=c,j.sizset=h);if(j.nodeName.toLowerCase()===b){k=j;break}j=j[a]}e[h]=k}}}var a=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b<a.length;b++)a[b]===a[b-1]&&a.splice(b--,1)}return a},m.matches=function(a,b){return m(a,null,null,b)},m.matchesSelector=function(a,b){return m(b,null,null,[a]).length>0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e<f;e++){h=o.order[e];if(g=o.leftMatch[h].exec(a)){i=g[1],g.splice(1,1);if(i.substr(i.length-1)!=="\\"){g[1]=(g[1]||"").replace(j,""),d=o.find[h](g,b,c);if(d!=null){a=a.replace(o.match[h],"");break}}}}d||(d=typeof b.getElementsByTagName!="undefined"?b.getElementsByTagName("*"):[]);return{set:d,expr:a}},m.filter=function(a,c,d,e){var f,g,h,i,j,k,l,n,p,q=a,r=[],s=c,t=c&&c[0]&&m.isXML(c[0]);while(a&&c.length){for(h in o.filter)if((f=o.leftMatch[h].exec(a))!=null&&f[2]){k=o.filter[h],l=f[1],g=!1,f.splice(1,1);if(l.substr(l.length-1)==="\\")continue;s===r&&(r=[]);if(o.preFilter[h]){f=o.preFilter[h](f,s,d,r,e,t);if(!f)g=i=!0;else if(f===!0)continue}if(f)for(n=0;(j=s[n])!=null;n++)j&&(i=k(j,f,n,s),p=e^i,d&&i!=null?p?g=!0:s[n]=!1:p&&(r.push(j),g=!0));if(i!==b){d||(s=r),a=a.replace(o.match[h],"");if(!g)return[];break}}if(a===q)if(g==null)m.error(a);else break;q=a}return s},m.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)};var n=m.getText=function(a){var b,c,d=a.nodeType,e="";if(d){if(d===1||d===9){if(typeof a.textContent=="string")return a.textContent;if(typeof a.innerText=="string")return a.innerText.replace(k,"");for(a=a.firstChild;a;a=a.nextSibling)e+=n(a)}else if(d===3||d===4)return a.nodeValue}else for(b=0;c=a[b];b++)c.nodeType!==8&&(e+=n(c));return e},o=m.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(a){return a.getAttribute("href")},type:function(a){return a.getAttribute("type")}},relative:{"+":function(a,b){var c=typeof b=="string",d=c&&!l.test(b),e=c&&!d;d&&(b=b.toLowerCase());for(var f=0,g=a.length,h;f<g;f++)if(h=a[f]){while((h=h.previousSibling)&&h.nodeType!==1);a[f]=e||h&&h.nodeName.toLowerCase()===b?h||!1:h===b}e&&m.filter(b,a,!0)},">":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e<f;e++){c=a[e];if(c){var g=c.parentNode;a[e]=g.nodeName.toLowerCase()===b?g:!1}}}else{for(;e<f;e++)c=a[e],c&&(a[e]=d?c.parentNode:c.parentNode===b);d&&m.filter(b,a,!0)}},"":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("parentNode",b,f,a,d,c)},"~":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("previousSibling",b,f,a,d,c)}},find:{ID:function(a,b,c){if(typeof b.getElementById!="undefined"&&!c){var d=b.getElementById(a[1]);return d&&d.parentNode?[d]:[]}},NAME:function(a,b){if(typeof b.getElementsByName!="undefined"){var c=[],d=b.getElementsByName(a[1]);for(var e=0,f=d.length;e<f;e++)d[e].getAttribute("name")===a[1]&&c.push(d[e]);return c.length===0?null:c}},TAG:function(a,b){if(typeof b.getElementsByTagName!="undefined")return b.getElementsByTagName(a[1])}},preFilter:{CLASS:function(a,b,c,d,e,f){a=" "+a[1].replace(j,"")+" ";if(f)return a;for(var g=0,h;(h=b[g])!=null;g++)h&&(e^(h.className&&(" "+h.className+" ").replace(/[\t\n\r]/g," ").indexOf(a)>=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return b<c[3]-0},gt:function(a,b,c){return b>c[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h<i;h++)if(g[h]===a)return!1;return!0}m.error(e)},CHILD:function(a,b){var c,e,f,g,h,i,j,k=b[1],l=a;switch(k){case"only":case"first":while(l=l.previousSibling)if(l.nodeType===1)return!1;if(k==="first")return!0;l=a;case"last":while(l=l.nextSibling)if(l.nodeType===1)return!1;return!0;case"nth":c=b[2],e=b[3];if(c===1&&e===0)return!0;f=b[0],g=a.parentNode;if(g&&(g[d]!==f||!a.nodeIndex)){i=0;for(l=g.firstChild;l;l=l.nextSibling)l.nodeType===1&&(l.nodeIndex=++i);g[d]=f}j=a.nodeIndex-e;return c===0?j===0:j%c===0&&j/c>=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c<e;c++)d.push(a[c]);else for(;a[c];c++)d.push(a[c]);return d}}var u,v;c.documentElement.compareDocumentPosition?u=function(a,b){if(a===b){h=!0;return 0}if(!a.compareDocumentPosition||!b.compareDocumentPosition)return a.compareDocumentPosition?-1:1;return a.compareDocumentPosition(b)&4?-1:1}:(u=function(a,b){if(a===b){h=!0;return 0}if(a.sourceIndex&&b.sourceIndex)return a.sourceIndex-b.sourceIndex;var c,d,e=[],f=[],g=a.parentNode,i=b.parentNode,j=g;if(g===i)return v(a,b);if(!g)return-1;if(!i)return 1;while(j)e.unshift(j),j=j.parentNode;j=i;while(j)f.unshift(j),j=j.parentNode;c=e.length,d=f.length;for(var k=0;k<c&&k<d;k++)if(e[k]!==f[k])return v(e[k],f[k]);return k===c?v(a,f[k],-1):v(e[k],b,1)},v=function(a,b,c){if(a===b)return c;var d=a.nextSibling;while(d){if(d===b)return-1;d=d.nextSibling}return 1}),function(){var a=c.createElement("div"),d="script"+(new Date).getTime(),e=c.documentElement;a.innerHTML="<a name='"+d+"'/>",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="<a href='#'></a>",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="<p class='TEST'></p>";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="<div class='test e'></div><div class='test'></div>";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h<i;h++)m(a,g[h],e,c);return m.filter(f,e)};m.attr=f.attr,m.selectors.attrMap={},f.find=m,f.expr=m.selectors,f.expr[":"]=f.expr.filters,f.unique=m.uniqueSort,f.text=m.getText,f.isXMLDoc=m.isXML,f.contains=m.contains}();var L=/Until$/,M=/^(?:parents|prevUntil|prevAll)/,N=/,/,O=/^.[^:#\[\.,]*$/,P=Array.prototype.slice,Q=f.expr.match.POS,R={children:!0,contents:!0,next:!0,prev:!0};f.fn.extend({find:function(a){var b=this,c,d;if(typeof a!="string")return f(a).filter(function(){for(c=0,d=b.length;c<d;c++)if(f.contains(b[c],this))return!0});var e=this.pushStack("","find",a),g,h,i;for(c=0,d=this.length;c<d;c++){g=e.length,f.find(a,this[c],e);if(c>0)for(h=g;h<e.length;h++)for(i=0;i<g;i++)if(e[i]===e[h]){e.splice(h--,1);break}}return e},has:function(a){var b=f(a);return this.filter(function(){for(var a=0,c=b.length;a<c;a++)if(f.contains(this,b[a]))return!0})},not:function(a){return this.pushStack(T(this,a,!1),"not",a)},filter:function(a){return this.pushStack(T(this,a,!0),"filter",a)},is:function(a){return!!a&&(typeof a=="string"?Q.test(a)?f(a,this.context).index(this[0])>=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d<a.length;d++)f(g).is(a[d])&&c.push({selector:a[d],elem:g,level:h});g=g.parentNode,h++}return c}var i=Q.test(a)||typeof a!="string"?f(a,b||this.context):0;for(d=0,e=this.length;d<e;d++){g=this[d];while(g){if(i?i.index(g)>-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling(a.parentNode.firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/<tbody/i,_=/<|&#?\w+;/,ba=/<(?:script|style)/i,bb=/<(?:script|object|embed|option|style)/i,bc=new RegExp("<(?:"+V+")","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*<!(?:\[CDATA\[|\-\-)/,bg={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div<div>","</div>"]),f.fn.extend({text:function(a){if(f.isFunction(a))return this.each(function(b){var c=f(this);c.text(a.call(this,b,c.text()))});if(typeof a!="object"&&a!==b)return this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a));return f.text(this)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function()
{for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){if(a===b)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1></$2>");try{for(var c=0,d=this.length;c<d;c++)this[c].nodeType===1&&(f.cleanData(this[c].getElementsByTagName("*")),this[c].innerHTML=a)}catch(e){this.empty().append(a)}}else f.isFunction(a)?this.each(function(b){var c=f(this);c.html(a.call(this,b,c.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(f.isFunction(a))return this.each(function(b){var c=f(this),d=c.html();c.replaceWith(a.call(this,b,d))});typeof a!="string"&&(a=f(a).detach());return this.each(function(){var b=this.nextSibling,c=this.parentNode;f(this).remove(),b?f(b).before(a):f(c).append(a)})}return this.length?this.pushStack(f(f.isFunction(a)?a():a),"replaceWith",a):this},detach:function(a){return this.remove(a,!0)},domManip:function(a,c,d){var e,g,h,i,j=a[0],k=[];if(!f.support.checkClone&&arguments.length===3&&typeof j=="string"&&bd.test(j))return this.each(function(){f(this).domManip(a,c,d,!0)});if(f.isFunction(j))return this.each(function(e){var g=f(this);a[0]=j.call(this,e,c?g.html():b),g.domManip(a,c,d)});if(this[0]){i=j&&j.parentNode,f.support.parentNode&&i&&i.nodeType===11&&i.childNodes.length===this.length?e={fragment:i}:e=f.buildFragment(a,this,k),h=e.fragment,h.childNodes.length===1?g=h=h.firstChild:g=h.firstChild;if(g){c=c&&f.nodeName(g,"tr");for(var l=0,m=this.length,n=m-1;l<m;l++)d.call(c?bi(this[l],g):this[l],e.cacheable||m>1&&l<n?f.clone(h,!0,!0):h)}k.length&&f.each(k,bp)}return this}}),f.buildFragment=function(a,b,d){var e,g,h,i,j=a[0];b&&b[0]&&(i=b[0].ownerDocument||b[0]),i.createDocumentFragment||(i=c),a.length===1&&typeof j=="string"&&j.length<512&&i===c&&j.charAt(0)==="<"&&!bb.test(j)&&(f.support.checkClone||!bd.test(j))&&(f.support.html5Clone||!bc.test(j))&&(g=!0,h=f.fragments[j],h&&h!==1&&(e=h)),e||(e=i.createDocumentFragment(),f.clean(a,i,e,d)),g&&(f.fragments[j]=h?e:1);return{fragment:e,cacheable:g}},f.fragments={},f.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){f.fn[a]=function(c){var d=[],e=f(c),g=this.length===1&&this[0].parentNode;if(g&&g.nodeType===11&&g.childNodes.length===1&&e.length===1){e[b](this[0]);return this}for(var h=0,i=e.length;h<i;h++){var j=(h>0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||!bc.test("<"+a.nodeName)?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g;b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);var h=[],i;for(var j=0,k;(k=a[j])!=null;j++){typeof k=="number"&&(k+="");if(!k)continue;if(typeof k=="string")if(!_.test(k))k=b.createTextNode(k);else{k=k.replace(Y,"<$1></$2>");var l=(Z.exec(k)||["",""])[1].toLowerCase(),m=bg[l]||bg._default,n=m[0],o=b.createElement("div");b===c?bh.appendChild(o):U(b).appendChild(o),o.innerHTML=m[1]+k+m[2];while(n--)o=o.lastChild;if(!f.support.tbody){var p=$.test(k),q=l==="table"&&!p?o.firstChild&&o.firstChild.childNodes:m[1]==="<table>"&&!p?o.childNodes:[];for(i=q.length-1;i>=0;--i)f.nodeName(q[i],"tbody")&&!q[i].childNodes.length&&q[i].parentNode.removeChild(q[i])}!f.support.leadingWhitespace&&X.test(k)&&o.insertBefore(b.createTextNode(X.exec(k)[0]),o.firstChild),k=o.childNodes}var r;if(!f.support.appendChecked)if(k[0]&&typeof (r=k.length)=="number")for(i=0;i<r;i++)bn(k[i]);else bn(k);k.nodeType?h.push(k):h=f.merge(h,k)}if(d){g=function(a){return!a.type||be.test(a.type)};for(j=0;h[j];j++)if(e&&f.nodeName(h[j],"script")&&(!h[j].type||h[j].type.toLowerCase()==="text/javascript"))e.push(h[j].parentNode?h[j].parentNode.removeChild(h[j]):h[j]);else{if(h[j].nodeType===1){var s=f.grep(h[j].getElementsByTagName("script"),g);h.splice.apply(h,[j+1,0].concat(s))}d.appendChild(h[j])}}return h},cleanData:function(a){var b,c,d=f.cache,e=f.event.special,g=f.support.deleteExpando;for(var h=0,i;(i=a[h])!=null;h++){if(i.nodeName&&f.noData[i.nodeName.toLowerCase()])continue;c=i[f.expando];if(c){b=d[c];if(b&&b.events){for(var j in b.events)e[j]?f.event.remove(i,j):f.removeEvent(i,j,b.handle);b.handle&&(b.handle.elem=null)}g?delete i[f.expando]:i.removeAttribute&&i.removeAttribute(f.expando),delete d[c]}}}});var bq=/alpha\([^)]*\)/i,br=/opacity=([^)]*)/,bs=/([A-Z]|^ms)/g,bt=/^-?\d+(?:px)?$/i,bu=/^-?\d/,bv=/^([\-+])=([\-+.\de]+)/,bw={position:"absolute",visibility:"hidden",display:"block"},bx=["Left","Right"],by=["Top","Bottom"],bz,bA,bB;f.fn.css=function(a,c){if(arguments.length===2&&c===b)return this;return f.access(this,a,c,!0,function(a,c,d){return d!==b?f.style(a,c,d):f.css(a,c)})},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=bz(a,"opacity","opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bv.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(bz)return bz(a,c)},swap:function(a,b,c){var d={};for(var e in b)d[e]=a.style[e],a.style[e]=b[e];c.call(a);for(e in b)a.style[e]=d[e]}}),f.curCSS=f.css,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){var e;if(c){if(a.offsetWidth!==0)return bC(a,b,d);f.swap(a,bw,function(){e=bC(a,b,d)});return e}},set:function(a,b){if(!bt.test(b))return b;b=parseFloat(b);if(b>=0)return b+"px"}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return br.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bq,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bq.test(g)?g.replace(bq,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){var c;f.swap(a,{display:"inline-block"},function(){b?c=bz(a,"margin-right","marginRight"):c=a.style.marginRight});return c}})}),c.defaultView&&c.defaultView.getComputedStyle&&(bA=function(a,b){var c,d,e;b=b.replace(bs,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b)));return c}),c.documentElement.currentStyle&&(bB=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f===null&&g&&(e=g[b])&&(f=e),!bt.test(f)&&bu.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f||0,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),bz=bA||bB,f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)});var bD=/%20/g,bE=/\[\]$/,bF=/\r?\n/g,bG=/#.*$/,bH=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bI=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bJ=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bK=/^(?:GET|HEAD)$/,bL=/^\/\//,bM=/\?/,bN=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,bO=/^(?:select|textarea)/i,bP=/\s+/,bQ=/([?&])_=[^&]*/,bR=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bS=f.fn.load,bT={},bU={},bV,bW,bX=["*/"]+["*"];try{bV=e.href}catch(bY){bV=c.createElement("a"),bV.href="",bV=bV.href}bW=bR.exec(bV.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bS)return bS.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("<div>").append(c.replace(bN,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bO.test(this.nodeName)||bI.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bF,"\r\n")}}):{name:b.name,value:c.replace(bF,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b_(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b_(a,b);return a},ajaxSettings:{url:bV,isLocal:bJ.test(bW[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bX},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bZ(bT),ajaxTransport:bZ(bU),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?cb(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cc(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bH.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bG,"").replace(bL,bW[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bP),d.crossDomain==null&&(r=bR.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bW[1]&&r[2]==bW[2]&&(r[3]||(r[1]==="http:"?80:443))==(bW[3]||(bW[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),b$(bT,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bK.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bM.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bQ,"$1_="+x);d.url=y+(y===d.url?(bM.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bX+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=b$(bU,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)ca(g,a[g],c,e);return d.join("&").replace(bD,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cd=f.now(),ce=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cd++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=b.contentType==="application/x-www-form-urlencoded"&&typeof b.data=="string";if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(ce.test(b.url)||e&&ce.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(ce,l),b.url===j&&(e&&(k=k.replace(ce,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var cf=a.ActiveXObject?function(){for(var a in ch)ch[a](0,1)}:!1,cg=0,ch;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ci()||cj()}:ci,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,cf&&delete ch[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n),m.text=h.responseText;try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cg,cf&&(ch||(ch={},f(a).unload(cf)),ch[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var ck={},cl,cm,cn=/^(?:toggle|show|hide)$/,co=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,cp,cq=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cr;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(cu("show",3),a,b,c);for(var g=0,h=this.length;g<h;g++)d=this[g],d.style&&(e=d.style.display,!f._data(d,"olddisplay")&&e==="none"&&(e=d.style.display=""),e===""&&f.css(d,"display")==="none"&&f._data(d,"olddisplay",cv(d.nodeName)));for(g=0;g<h;g++){d=this[g];if(d.style){e=d.style.display;if(e===""||e==="none")d.style.display=f._data(d,"olddisplay")||""}}return this},hide:function(a,b,c){if(a||a===0)return this.animate(cu("hide",3),a,b,c);var d,e,g=0,h=this.length;for(;g<h;g++)d=this[g],d.style&&(e=f.css(d,"display"),e!=="none"&&!f._data(d,"olddisplay")&&f._data(d,"olddisplay",e));for(g=0;g<h;g++)this[g].style&&(this[g].style.display="none");return this},_toggle:f.fn.toggle,toggle:function(a,b,c){var d=typeof a=="boolean";f.isFunction(a)&&f.isFunction(b)?this._toggle.apply(this,arguments):a==null||d?this.each(function(){var b=d?a:f(this).is(":hidden");f(this)[b?"show":"hide"]()}):this.animate(cu("toggle",3),a,b,c);return this},fadeTo:function(a,b,c,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){function g(){e.queue===!1&&f._mark(this);var b=f.extend({},e),c=this.nodeType===1,d=c&&f(this).is(":hidden"),g,h,i,j,k,l,m,n,o;b.animatedProperties={};for(i in a){g=f.camelCase(i),i!==g&&(a[g]=a[i],delete a[i]),h=a[g],f.isArray(h)?(b.animatedProperties[g]=h[1],h=a[g]=h[0]):b.animatedProperties[g]=b.specialEasing&&b.specialEasing[g]||b.easing||"swing";if(h==="hide"&&d||h==="show"&&!d)return b.complete.call(this);c&&(g==="height"||g==="width")&&(b.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY],f.css(this,"display")==="inline"&&f.css(this,"float")==="none"&&(!f.support.inlineBlockNeedsLayout||cv(this.nodeName)==="inline"?this.style.display="inline-block":this.style.zoom=1))}b.overflow!=null&&(this.style.overflow="hidden");for(i in a)j=new f.fx(this,b,i),h=a[i],cn.test(h)?(o=f._data(this,"toggle"+i)||(h==="toggle"?d?"show":"hide":0),o?(f._data(this,"toggle"+i,o==="show"?"hide":"show"),j[o]()):j[h]()):(k=co.exec(h),l=j.cur(),k?(m=parseFloat(k[2]),n=k[3]||(f.cssNumber[i]?"":"px"),n!=="px"&&(f.style(this,i,(m||1)+n),l=(m||1)/j.cur()*l,f.style(this,i,l+n)),k[1]&&(m=(k[1]==="-="?-1:1)*m+l),j.custom(l,m,n)):j.custom(l,h,""));return!0}var e=f.speed(b,c,d);if(f.isEmptyObject(a))return this.each(e.complete,[!1]);a=f.extend({},a);return e.queue===!1?this.each(g):this.queue(e.queue,g)},stop:function(a,c,d){typeof a!="string"&&(d=c,c=a,a=b),c&&a!==!1&&this.queue(a||"fx",[]);return this.each(function(){function h(a,b,c){var e=b[c];f.removeData(a,c,!0),e.stop(d)}var b,c=!1,e=f.timers,g=f._data(this);d||f._unmark(!0,this);if(a==null)for(b in g)g[b]&&g[b].stop&&b.indexOf(".run")===b.length-4&&h(this,g,b);else g[b=a+".run"]&&g[b].stop&&h(this,g,b);for(b=e.length;b--;)e[b].elem===this&&(a==null||e[b].queue===a)&&(d?e[b](!0):e[b].saveState(),c=!0,e.splice(b,1));(!d||!c)&&f.dequeue(this,a)})}}),f.each({slideDown:cu("show",1),slideUp:cu("hide",1),slideToggle:cu("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){f.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),f.extend({speed:function(a,b,c){var d=a&&typeof a=="object"?f.extend({},a):{complete:c||!c&&b||f.isFunction(a)&&a,duration:a,easing:c&&b||b&&!f.isFunction(b)&&b};d.duration=f.fx.off?0:typeof d.duration=="number"?d.duration:d.duration in f.fx.speeds?f.fx.speeds[d.duration]:f.fx.speeds._default;if(d.queue==null||d.queue===!0)d.queue="fx";d.old=d.complete,d.complete=function(a){f.isFunction(d.old)&&d.old.call(this),d.queue?f.dequeue(this,d.queue):a!==!1&&f._unmark(this)};return d},easing:{linear:function(a,b,c,d){return c+d*a},swing:function(a,b,c,d){return(-Math.cos(a*Math.PI)/2+.5)*d+c}},timers:[],fx:function(a,b,c){this.options=b,this.elem=a,this.prop=c,b.orig=b.orig||{}}}),f.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this),(f.fx.step[this.prop]||f.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a,b=f.css(this.elem,this.prop);return isNaN(a=parseFloat(b))?!b||b==="auto"?0:b:a},custom:function(a,c,d){function
|
(a){return e.step(a)}var e=this,g=f.fx;this.startTime=cr||cs(),this.end=c,this.now=this.start=a,this.pos=this.state=0,this.unit=d||this.unit||(f.cssNumber[this.prop]?"":"px"),h.queue=this.options.queue,h.elem=this.elem,h.saveState=function(){e.options.hide&&f._data(e.elem,"fxshow"+e.prop)===b&&f._data(e.elem,"fxshow"+e.prop,e.start)},h()&&f.timers.push(h)&&!cp&&(cp=setInterval(g.tick,g.interval))},show:function(){var a=f._data(this.elem,"fxshow"+this.prop);this.options.orig[this.prop]=a||f.style(this.elem,this.prop),this.options.show=!0,a!==b?this.custom(this.cur(),a):this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur()),f(this.elem).show()},hide:function(){this.options.orig[this.prop]=f._data(this.elem,"fxshow"+this.prop)||f.style(this.elem,this.prop),this.options.hide=!0,this.custom(this.cur(),0)},step:function(a){var b,c,d,e=cr||cs(),g=!0,h=this.elem,i=this.options;if(a||e>=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c<b.length;c++)a=b[c],!a()&&b[c]===a&&b.splice(c--,1);b.length||f.fx.stop()},interval:13,stop:function(){clearInterval(cp),cp=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){f.style(a.elem,"opacity",a.now)},_default:function(a){a.elem.style&&a.elem.style[a.prop]!=null?a.elem.style[a.prop]=a.now+a.unit:a.elem[a.prop]=a.now}}}),f.each(["width","height"],function(a,b){f.fx.step[b]=function(a){f.style(a.elem,b,Math.max(0,a.now)+a.unit)}}),f.expr&&f.expr.filters&&(f.expr.filters.animated=function(a){return f.grep(f.timers,function(b){return a===b.elem}).length});var cw=/^t(?:able|d|h)$/i,cx=/^(?:body|html)$/i;"getBoundingClientRect"in c.documentElement?f.fn.offset=function(a){var b=this[0],c;if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);try{c=b.getBoundingClientRect()}catch(d){}var e=b.ownerDocument,g=e.documentElement;if(!c||!f.contains(g,b))return c?{top:c.top,left:c.left}:{top:0,left:0};var h=e.body,i=cy(e),j=g.clientTop||h.clientTop||0,k=g.clientLeft||h.clientLeft||0,l=i.pageYOffset||f.support.boxModel&&g.scrollTop||h.scrollTop,m=i.pageXOffset||f.support.boxModel&&g.scrollLeft||h.scrollLeft,n=c.top+l-j,o=c.left+m-k;return{top:n,left:o}}:f.fn.offset=function(a){var b=this[0];if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);var c,d=b.offsetParent,e=b,g=b.ownerDocument,h=g.documentElement,i=g.body,j=g.defaultView,k=j?j.getComputedStyle(b,null):b.currentStyle,l=b.offsetTop,m=b.offsetLeft;while((b=b.parentNode)&&b!==i&&b!==h){if(f.support.fixedPosition&&k.position==="fixed")break;c=j?j.getComputedStyle(b,null):b.currentStyle,l-=b.scrollTop,m-=b.scrollLeft,b===d&&(l+=b.offsetTop,m+=b.offsetLeft,f.support.doesNotAddBorder&&(!f.support.doesAddBorderForTableAndCells||!cw.test(b.nodeName))&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),e=d,d=b.offsetParent),f.support.subtractsBorderForOverflowNotVisible&&c.overflow!=="visible"&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),k=c}if(k.position==="relative"||k.position==="static")l+=i.offsetTop,m+=i.offsetLeft;f.support.fixedPosition&&k.position==="fixed"&&(l+=Math.max(h.scrollTop,i.scrollTop),m+=Math.max(h.scrollLeft,i.scrollLeft));return{top:l,left:m}},f.offset={bodyOffset:function(a){var b=a.offsetTop,c=a.offsetLeft;f.support.doesNotIncludeMarginInBodyOffset&&(b+=parseFloat(f.css(a,"marginTop"))||0,c+=parseFloat(f.css(a,"marginLeft"))||0);return{top:b,left:c}},setOffset:function(a,b,c){var d=f.css(a,"position");d==="static"&&(a.style.position="relative");var e=f(a),g=e.offset(),h=f.css(a,"top"),i=f.css(a,"left"),j=(d==="absolute"||d==="fixed")&&f.inArray("auto",[h,i])>-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each(["Left","Top"],function(a,c){var d="scroll"+c;f.fn[d]=function(c){var e,g;if(c===b){e=this[0];if(!e)return null;g=cy(e);return g?"pageXOffset"in g?g[a?"pageYOffset":"pageXOffset"]:f.support.boxModel&&g.document.documentElement[d]||g.document.body[d]:e[d]}return this.each(function(){g=cy(this),g?g.scrollTo(a?f(g).scrollLeft():c,a?c:f(g).scrollTop()):this[d]=c})}}),f.each(["Height","Width"],function(a,c){var d=c.toLowerCase();f.fn["inner"+c]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,d,"padding")):this[d]():null},f.fn["outer"+c]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,d,a?"margin":"border")):this[d]():null},f.fn[d]=function(a){var e=this[0];if(!e)return a==null?null:this;if(f.isFunction(a))return this.each(function(b){var c=f(this);c[d](a.call(this,b,c[d]()))});if(f.isWindow(e)){var g=e.document.documentElement["client"+c],h=e.document.body;return e.document.compatMode==="CSS1Compat"&&g||h&&h["client"+c]||g}if(e.nodeType===9)return Math.max(e.documentElement["client"+c],e.body["scroll"+c],e.documentElement["scroll"+c],e.body["offset"+c],e.documentElement["offset"+c]);if(a===b){var i=f.css(e,d),j=parseFloat(i);return f.isNumeric(j)?j:i}return this.css(d,typeof a=="string"?a:a+"px")}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window);
|
h
|
admin.rs
|
//! Admin client.
//!
//! The main object is the [`AdminClient`] struct.
//!
//! [`AdminClient`]: struct.AdminClient.html
use std::collections::HashMap;
use std::ffi::{c_void, CStr, CString};
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::thread::{self, JoinHandle};
use std::time::Duration;
use futures::channel::oneshot;
use futures::future::{self, Either, FutureExt};
use futures::ready;
use log::{trace, warn};
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{Client, ClientContext, DefaultClientContext, NativeQueue};
use crate::config::{ClientConfig, FromClientConfig, FromClientConfigAndContext};
use crate::error::{IsError, KafkaError, KafkaResult};
use crate::util::{cstr_to_owned, AsCArray, ErrBuf, IntoOpaque, KafkaDrop, NativePtr, Timeout};
//
// ********** ADMIN CLIENT **********
//
/// A client for the Kafka admin API.
///
/// `AdminClient` provides programmatic access to managing a Kafka cluster,
/// notably manipulating topics, partitions, and configuration paramaters.
pub struct AdminClient<C: ClientContext> {
client: Client<C>,
queue: Arc<NativeQueue>,
should_stop: Arc<AtomicBool>,
handle: Option<JoinHandle<()>>,
}
impl<C: ClientContext> AdminClient<C> {
/// Creates new topics according to the provided `NewTopic` specifications.
///
/// Note that while the API supports creating multiple topics at once, it
/// is not transactional. Creation of some topics may succeed while others
/// fail. Be sure to check the result of each individual operation.
pub fn create_topics<'a, I>(
&self,
topics: I,
opts: &AdminOptions,
) -> impl Future<Output = KafkaResult<Vec<TopicResult>>>
where
I: IntoIterator<Item = &'a NewTopic<'a>>,
{
match self.create_topics_inner(topics, opts) {
Ok(rx) => Either::Left(CreateTopicsFuture { rx }),
Err(err) => Either::Right(future::err(err)),
}
}
fn create_topics_inner<'a, I>(
&self,
topics: I,
opts: &AdminOptions,
) -> KafkaResult<oneshot::Receiver<NativeEvent>>
where
I: IntoIterator<Item = &'a NewTopic<'a>>,
{
let mut native_topics = Vec::new();
let mut err_buf = ErrBuf::new();
for t in topics {
native_topics.push(t.to_native(&mut err_buf)?);
}
let (native_opts, rx) = opts.to_native(self.client.native_ptr(), &mut err_buf)?;
unsafe {
rdsys::rd_kafka_CreateTopics(
self.client.native_ptr(),
native_topics.as_c_array(),
native_topics.len(),
native_opts.ptr(),
self.queue.ptr(),
);
}
Ok(rx)
}
/// Deletes the named topics.
///
/// Note that while the API supports deleting multiple topics at once, it is
/// not transactional. Deletion of some topics may succeed while others
/// fail. Be sure to check the result of each individual operation.
pub fn delete_topics(
&self,
topic_names: &[&str],
opts: &AdminOptions,
) -> impl Future<Output = KafkaResult<Vec<TopicResult>>> {
match self.delete_topics_inner(topic_names, opts) {
Ok(rx) => Either::Left(DeleteTopicsFuture { rx }),
Err(err) => Either::Right(future::err(err)),
}
}
fn delete_topics_inner(
&self,
topic_names: &[&str],
opts: &AdminOptions,
) -> KafkaResult<oneshot::Receiver<NativeEvent>> {
let mut native_topics = Vec::new();
let mut err_buf = ErrBuf::new();
for tn in topic_names {
let tn_c = CString::new(*tn)?;
let native_topic = unsafe {
NativeDeleteTopic::from_ptr(rdsys::rd_kafka_DeleteTopic_new(tn_c.as_ptr())).unwrap()
};
native_topics.push(native_topic);
}
let (native_opts, rx) = opts.to_native(self.client.native_ptr(), &mut err_buf)?;
unsafe {
rdsys::rd_kafka_DeleteTopics(
self.client.native_ptr(),
native_topics.as_c_array(),
native_topics.len(),
native_opts.ptr(),
self.queue.ptr(),
);
}
Ok(rx)
}
/// Adds additional partitions to existing topics according to the provided
/// `NewPartitions` specifications.
///
/// Note that while the API supports creating partitions for multiple topics
/// at once, it is not transactional. Creation of partitions for some topics
/// may succeed while others fail. Be sure to check the result of each
/// individual operation.
pub fn create_partitions<'a, I>(
&self,
partitions: I,
opts: &AdminOptions,
) -> impl Future<Output = KafkaResult<Vec<TopicResult>>>
where
I: IntoIterator<Item = &'a NewPartitions<'a>>,
{
match self.create_partitions_inner(partitions, opts) {
Ok(rx) => Either::Left(CreatePartitionsFuture { rx }),
Err(err) => Either::Right(future::err(err)),
}
}
fn create_partitions_inner<'a, I>(
&self,
partitions: I,
opts: &AdminOptions,
) -> KafkaResult<oneshot::Receiver<NativeEvent>>
where
I: IntoIterator<Item = &'a NewPartitions<'a>>,
{
let mut native_partitions = Vec::new();
let mut err_buf = ErrBuf::new();
for p in partitions {
native_partitions.push(p.to_native(&mut err_buf)?);
}
let (native_opts, rx) = opts.to_native(self.client.native_ptr(), &mut err_buf)?;
unsafe {
rdsys::rd_kafka_CreatePartitions(
self.client.native_ptr(),
native_partitions.as_c_array(),
native_partitions.len(),
native_opts.ptr(),
self.queue.ptr(),
);
}
Ok(rx)
}
/// Retrieves the configuration parameters for the specified resources.
///
/// Note that while the API supports describing multiple configurations at
/// once, it is not transactional. There is no guarantee that you will see
/// a consistent snapshot of the configuration across different resources.
pub fn describe_configs<'a, I>(
&self,
configs: I,
opts: &AdminOptions,
) -> impl Future<Output = KafkaResult<Vec<ConfigResourceResult>>>
where
I: IntoIterator<Item = &'a ResourceSpecifier<'a>>,
{
match self.describe_configs_inner(configs, opts) {
Ok(rx) => Either::Left(DescribeConfigsFuture { rx }),
Err(err) => Either::Right(future::err(err)),
}
}
fn describe_configs_inner<'a, I>(
&self,
configs: I,
opts: &AdminOptions,
) -> KafkaResult<oneshot::Receiver<NativeEvent>>
where
I: IntoIterator<Item = &'a ResourceSpecifier<'a>>,
{
let mut native_configs = Vec::new();
let mut err_buf = ErrBuf::new();
for c in configs {
let (name, typ) = match c {
ResourceSpecifier::Topic(name) => (
CString::new(*name)?,
RDKafkaResourceType::RD_KAFKA_RESOURCE_TOPIC,
),
ResourceSpecifier::Group(name) => (
CString::new(*name)?,
RDKafkaResourceType::RD_KAFKA_RESOURCE_GROUP,
),
ResourceSpecifier::Broker(id) => (
CString::new(format!("{}", id))?,
RDKafkaResourceType::RD_KAFKA_RESOURCE_BROKER,
),
};
native_configs.push(unsafe {
NativeConfigResource::from_ptr(rdsys::rd_kafka_ConfigResource_new(
typ,
name.as_ptr(),
))
.unwrap()
});
}
let (native_opts, rx) = opts.to_native(self.client.native_ptr(), &mut err_buf)?;
unsafe {
rdsys::rd_kafka_DescribeConfigs(
self.client.native_ptr(),
native_configs.as_c_array(),
native_configs.len(),
native_opts.ptr(),
self.queue.ptr(),
);
}
Ok(rx)
}
/// Sets configuration parameters for the specified resources.
///
/// Note that while the API supports altering multiple resources at once, it
/// is not transactional. Alteration of some resources may succeed while
/// others fail. Be sure to check the result of each individual operation.
pub fn alter_configs<'a, I>(
&self,
configs: I,
opts: &AdminOptions,
) -> impl Future<Output = KafkaResult<Vec<AlterConfigsResult>>>
where
I: IntoIterator<Item = &'a AlterConfig<'a>>,
{
match self.alter_configs_inner(configs, opts) {
Ok(rx) => Either::Left(AlterConfigsFuture { rx }),
Err(err) => Either::Right(future::err(err)),
}
}
fn alter_configs_inner<'a, I>(
&self,
configs: I,
opts: &AdminOptions,
) -> KafkaResult<oneshot::Receiver<NativeEvent>>
where
I: IntoIterator<Item = &'a AlterConfig<'a>>,
{
let mut native_configs = Vec::new();
let mut err_buf = ErrBuf::new();
for c in configs {
native_configs.push(c.to_native(&mut err_buf)?);
}
let (native_opts, rx) = opts.to_native(self.client.native_ptr(), &mut err_buf)?;
unsafe {
rdsys::rd_kafka_AlterConfigs(
self.client.native_ptr(),
native_configs.as_c_array(),
native_configs.len(),
native_opts.ptr(),
self.queue.ptr(),
);
}
Ok(rx)
}
/// Returns the client underlying this admin client.
pub fn inner(&self) -> &Client<C> {
&self.client
}
}
impl FromClientConfig for AdminClient<DefaultClientContext> {
fn from_config(config: &ClientConfig) -> KafkaResult<AdminClient<DefaultClientContext>> {
AdminClient::from_config_and_context(config, DefaultClientContext)
}
}
impl<C: ClientContext> FromClientConfigAndContext<C> for AdminClient<C> {
fn from_config_and_context(config: &ClientConfig, context: C) -> KafkaResult<AdminClient<C>> {
let native_config = config.create_native_config()?;
// librdkafka only provides consumer and producer types. We follow the
// example of the Python bindings in choosing to pretend to be a
// producer, as producer clients are allegedly more lightweight. [0]
//
// [0]: https://github.com/confluentinc/confluent-kafka-python/blob/bfb07dfbca47c256c840aaace83d3fe26c587360/confluent_kafka/src/Admin.c#L1492-L1493
let client = Client::new(
config,
native_config,
RDKafkaType::RD_KAFKA_PRODUCER,
context,
)?;
let queue = Arc::new(client.new_native_queue());
let should_stop = Arc::new(AtomicBool::new(false));
let handle = start_poll_thread(queue.clone(), should_stop.clone());
Ok(AdminClient {
client,
queue,
should_stop,
handle: Some(handle),
})
}
}
impl<C: ClientContext> Drop for AdminClient<C> {
fn drop(&mut self) {
trace!("Stopping polling");
self.should_stop.store(true, Ordering::Relaxed);
trace!("Waiting for polling thread termination");
match self.handle.take().unwrap().join() {
Ok(()) => trace!("Polling stopped"),
Err(e) => warn!("Failure while terminating thread: {:?}", e),
};
}
}
fn start_poll_thread(queue: Arc<NativeQueue>, should_stop: Arc<AtomicBool>) -> JoinHandle<()> {
thread::Builder::new()
.name("admin client polling thread".into())
.spawn(move || {
trace!("Admin polling thread loop started");
loop {
let event = queue.poll(Duration::from_millis(100));
if event.is_null() {
if should_stop.load(Ordering::Relaxed) {
// We received nothing and the thread should stop, so
// break the loop.
break;
}
continue;
}
let event = unsafe { NativeEvent::from_ptr(event).unwrap() };
let tx: Box<oneshot::Sender<NativeEvent>> =
unsafe { IntoOpaque::from_ptr(rdsys::rd_kafka_event_opaque(event.ptr())) };
let _ = tx.send(event);
}
trace!("Admin polling thread loop terminated");
})
.expect("Failed to start polling thread")
}
type NativeEvent = NativePtr<RDKafkaEvent>;
unsafe impl KafkaDrop for RDKafkaEvent {
const TYPE: &'static str = "event";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_event_destroy;
}
unsafe impl Send for NativeEvent {}
unsafe impl Sync for NativeEvent {}
impl NativePtr<RDKafkaEvent> {
fn check_error(&self) -> KafkaResult<()> {
let err = unsafe { rdsys::rd_kafka_event_error(self.ptr()) };
if err.is_error() {
Err(KafkaError::AdminOp(err.into()))
} else {
Ok(())
}
}
}
//
// ********** ADMIN OPTIONS **********
//
/// Options for an admin API request.
#[derive(Default)]
pub struct AdminOptions {
request_timeout: Option<Timeout>,
operation_timeout: Option<Timeout>,
validate_only: bool,
broker_id: Option<i32>,
}
impl AdminOptions {
/// Creates a new `AdminOptions`.
pub fn new() -> AdminOptions {
AdminOptions::default()
}
/// Sets the overall request timeout, including broker lookup, request
/// transmission, operation time on broker, and response.
///
/// Defaults to the `socket.timeout.ms` configuration parameter.
pub fn request_timeout<T: Into<Timeout>>(mut self, timeout: Option<T>) -> Self {
self.request_timeout = timeout.map(Into::into);
self
}
/// Sets the broker's operation timeout, such as the timeout for
/// CreateTopics to complete the creation of topics on the controller before
/// returning a result to the application.
///
/// If unset (the default), the API calls will return immediately after
/// triggering the operation.
///
/// Only the CreateTopics, DeleteTopics, and CreatePartitions API calls
/// respect this option.
pub fn operation_timeout<T: Into<Timeout>>(mut self, timeout: Option<T>) -> Self {
self.operation_timeout = timeout.map(Into::into);
self
}
/// Tells the broker to only validate the request, without performing the
/// requested operation.
///
/// Defaults to false.
pub fn validate_only(mut self, validate_only: bool) -> Self {
self.validate_only = validate_only;
self
}
/// Override what broker the admin request will be sent to.
///
/// By default, a reasonable broker will be selected automatically. See the
/// librdkafka docs on `rd_kafka_AdminOptions_set_broker` for details.
pub fn
|
<T: Into<Option<i32>>>(mut self, broker_id: T) -> Self {
self.broker_id = broker_id.into();
self
}
fn to_native(
&self,
client: *mut RDKafka,
err_buf: &mut ErrBuf,
) -> KafkaResult<(NativeAdminOptions, oneshot::Receiver<NativeEvent>)> {
let native_opts = unsafe {
NativeAdminOptions::from_ptr(rdsys::rd_kafka_AdminOptions_new(
client,
RDKafkaAdminOp::RD_KAFKA_ADMIN_OP_ANY,
))
.unwrap()
};
if let Some(timeout) = self.request_timeout {
let res = unsafe {
rdsys::rd_kafka_AdminOptions_set_request_timeout(
native_opts.ptr(),
timeout.as_millis(),
err_buf.as_mut_ptr(),
err_buf.capacity(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
if let Some(timeout) = self.operation_timeout {
let res = unsafe {
rdsys::rd_kafka_AdminOptions_set_operation_timeout(
native_opts.ptr(),
timeout.as_millis(),
err_buf.as_mut_ptr(),
err_buf.capacity(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
if self.validate_only {
let res = unsafe {
rdsys::rd_kafka_AdminOptions_set_validate_only(
native_opts.ptr(),
1, // true
err_buf.as_mut_ptr(),
err_buf.capacity(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
if let Some(broker_id) = self.broker_id {
let res = unsafe {
rdsys::rd_kafka_AdminOptions_set_broker(
native_opts.ptr(),
broker_id,
err_buf.as_mut_ptr(),
err_buf.capacity(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
let (tx, rx) = oneshot::channel();
let tx = Box::into_raw(Box::new(tx)) as *mut c_void;
unsafe { rdsys::rd_kafka_AdminOptions_set_opaque(native_opts.ptr(), tx) };
Ok((native_opts, rx))
}
}
unsafe impl KafkaDrop for RDKafkaAdminOptions {
const TYPE: &'static str = "admin options";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_AdminOptions_destroy;
}
type NativeAdminOptions = NativePtr<RDKafkaAdminOptions>;
fn check_rdkafka_invalid_arg(res: RDKafkaRespErr, err_buf: &ErrBuf) -> KafkaResult<()> {
match res.into() {
RDKafkaErrorCode::NoError => Ok(()),
RDKafkaErrorCode::InvalidArgument => {
let msg = if err_buf.len() == 0 {
"invalid argument".into()
} else {
err_buf.to_string()
};
Err(KafkaError::AdminOpCreation(msg))
}
res => Err(KafkaError::AdminOpCreation(format!(
"setting admin options returned unexpected error code {}",
res
))),
}
}
//
// ********** RESPONSE HANDLING **********
//
/// The result of an individual CreateTopic, DeleteTopic, or
/// CreatePartition operation.
pub type TopicResult = Result<String, (String, RDKafkaErrorCode)>;
fn build_topic_results(topics: *const *const RDKafkaTopicResult, n: usize) -> Vec<TopicResult> {
let mut out = Vec::with_capacity(n);
for i in 0..n {
let topic = unsafe { *topics.add(i) };
let name = unsafe { cstr_to_owned(rdsys::rd_kafka_topic_result_name(topic)) };
let err = unsafe { rdsys::rd_kafka_topic_result_error(topic) };
if err.is_error() {
out.push(Err((name, err.into())));
} else {
out.push(Ok(name));
}
}
out
}
//
// Create topic handling
//
/// Configuration for a CreateTopic operation.
#[derive(Debug)]
pub struct NewTopic<'a> {
/// The name of the new topic.
pub name: &'a str,
/// The initial number of partitions.
pub num_partitions: i32,
/// The initial replication configuration.
pub replication: TopicReplication<'a>,
/// The initial configuration parameters for the topic.
pub config: Vec<(&'a str, &'a str)>,
}
impl<'a> NewTopic<'a> {
/// Creates a new `NewTopic`.
pub fn new(
name: &'a str,
num_partitions: i32,
replication: TopicReplication<'a>,
) -> NewTopic<'a> {
NewTopic {
name,
num_partitions,
replication,
config: Vec::new(),
}
}
/// Sets a new parameter in the initial topic configuration.
pub fn set(mut self, key: &'a str, value: &'a str) -> NewTopic<'a> {
self.config.push((key, value));
self
}
fn to_native(&self, err_buf: &mut ErrBuf) -> KafkaResult<NativeNewTopic> {
let name = CString::new(self.name)?;
let repl = match self.replication {
TopicReplication::Fixed(n) => n,
TopicReplication::Variable(partitions) => {
if partitions.len() as i32 != self.num_partitions {
return Err(KafkaError::AdminOpCreation(format!(
"replication configuration for topic '{}' assigns {} partition(s), \
which does not match the specified number of partitions ({})",
self.name,
partitions.len(),
self.num_partitions,
)));
}
-1
}
};
// N.B.: we wrap topic immediately, so that it is destroyed via the
// NativeNewTopic's Drop implementation if replica assignment or config
// installation fails.
let topic = unsafe {
NativeNewTopic::from_ptr(rdsys::rd_kafka_NewTopic_new(
name.as_ptr(),
self.num_partitions,
repl,
err_buf.as_mut_ptr(),
err_buf.capacity(),
))
}
.ok_or_else(|| KafkaError::AdminOpCreation(err_buf.to_string()))?;
if let TopicReplication::Variable(assignment) = self.replication {
for (partition_id, broker_ids) in assignment.iter().enumerate() {
let res = unsafe {
rdsys::rd_kafka_NewTopic_set_replica_assignment(
topic.ptr(),
partition_id as i32,
broker_ids.as_ptr() as *mut i32,
broker_ids.len(),
err_buf.as_mut_ptr(),
err_buf.capacity(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
}
for (key, val) in &self.config {
let key_c = CString::new(*key)?;
let val_c = CString::new(*val)?;
let res = unsafe {
rdsys::rd_kafka_NewTopic_set_config(topic.ptr(), key_c.as_ptr(), val_c.as_ptr())
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
Ok(topic)
}
}
/// An assignment of partitions to replicas.
///
/// Each element in the outer slice corresponds to the partition with that
/// index. The inner slice specifies the broker IDs to which replicas of that
/// partition should be assigned.
pub type PartitionAssignment<'a> = &'a [&'a [i32]];
/// Replication configuration for a new topic.
#[derive(Debug)]
pub enum TopicReplication<'a> {
/// All partitions should use the same fixed replication factor.
Fixed(i32),
/// Each partition should use the replica assignment from
/// `PartitionAssignment`.
Variable(PartitionAssignment<'a>),
}
type NativeNewTopic = NativePtr<RDKafkaNewTopic>;
unsafe impl KafkaDrop for RDKafkaNewTopic {
const TYPE: &'static str = "new topic";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_NewTopic_destroy;
}
struct CreateTopicsFuture {
rx: oneshot::Receiver<NativeEvent>,
}
impl Future for CreateTopicsFuture {
type Output = KafkaResult<Vec<TopicResult>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let event = ready!(self.rx.poll_unpin(cx)).map_err(|_| KafkaError::Canceled)?;
event.check_error()?;
let res = unsafe { rdsys::rd_kafka_event_CreateTopics_result(event.ptr()) };
if res.is_null() {
let typ = unsafe { rdsys::rd_kafka_event_type(event.ptr()) };
return Poll::Ready(Err(KafkaError::AdminOpCreation(format!(
"create topics request received response of incorrect type ({})",
typ
))));
}
let mut n = 0;
let topics = unsafe { rdsys::rd_kafka_CreateTopics_result_topics(res, &mut n) };
Poll::Ready(Ok(build_topic_results(topics, n)))
}
}
//
// Delete topic handling
//
type NativeDeleteTopic = NativePtr<RDKafkaDeleteTopic>;
unsafe impl KafkaDrop for RDKafkaDeleteTopic {
const TYPE: &'static str = "delete topic";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_DeleteTopic_destroy;
}
struct DeleteTopicsFuture {
rx: oneshot::Receiver<NativeEvent>,
}
impl Future for DeleteTopicsFuture {
type Output = KafkaResult<Vec<TopicResult>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let event = ready!(self.rx.poll_unpin(cx)).map_err(|_| KafkaError::Canceled)?;
event.check_error()?;
let res = unsafe { rdsys::rd_kafka_event_DeleteTopics_result(event.ptr()) };
if res.is_null() {
let typ = unsafe { rdsys::rd_kafka_event_type(event.ptr()) };
return Poll::Ready(Err(KafkaError::AdminOpCreation(format!(
"delete topics request received response of incorrect type ({})",
typ
))));
}
let mut n = 0;
let topics = unsafe { rdsys::rd_kafka_DeleteTopics_result_topics(res, &mut n) };
Poll::Ready(Ok(build_topic_results(topics, n)))
}
}
//
// Create partitions handling
//
/// Configuration for a CreatePartitions operation.
pub struct NewPartitions<'a> {
/// The name of the topic to which partitions should be added.
pub topic_name: &'a str,
/// The total number of partitions after the operation completes.
pub new_partition_count: usize,
/// The replica assignments for the new partitions.
pub assignment: Option<PartitionAssignment<'a>>,
}
impl<'a> NewPartitions<'a> {
/// Creates a new `NewPartitions`.
pub fn new(topic_name: &'a str, new_partition_count: usize) -> NewPartitions<'a> {
NewPartitions {
topic_name,
new_partition_count,
assignment: None,
}
}
/// Sets the partition replica assignment for the new partitions. Only
/// assignments for newly created replicas should be included.
pub fn assign(mut self, assignment: PartitionAssignment<'a>) -> NewPartitions<'_> {
self.assignment = Some(assignment);
self
}
fn to_native(&self, err_buf: &mut ErrBuf) -> KafkaResult<NativeNewPartitions> {
let name = CString::new(self.topic_name)?;
if let Some(assignment) = self.assignment {
// If assignment contains more than self.new_partition_count
// entries, we'll trip an assertion in librdkafka that crashes the
// process. Note that this check isn't a guarantee that the
// partition assignment is valid, since the assignment should only
// contain entries for the *new* partitions added, and not any
// existing partitions, but we can let the server handle that
// validation--we just need to make sure not to crash librdkafka.
if assignment.len() > self.new_partition_count {
return Err(KafkaError::AdminOpCreation(format!(
"partition assignment for topic '{}' assigns {} partition(s), \
which is more than the requested total number of partitions ({})",
self.topic_name,
assignment.len(),
self.new_partition_count,
)));
}
}
// N.B.: we wrap partition immediately, so that it is destroyed via
// NativeNewPartitions's Drop implementation if replica assignment or
// config installation fails.
let partitions = unsafe {
NativeNewPartitions::from_ptr(rdsys::rd_kafka_NewPartitions_new(
name.as_ptr(),
self.new_partition_count,
err_buf.as_mut_ptr(),
err_buf.capacity(),
))
}
.ok_or_else(|| KafkaError::AdminOpCreation(err_buf.to_string()))?;
if let Some(assignment) = self.assignment {
for (partition_id, broker_ids) in assignment.iter().enumerate() {
let res = unsafe {
rdsys::rd_kafka_NewPartitions_set_replica_assignment(
partitions.ptr(),
partition_id as i32,
broker_ids.as_ptr() as *mut i32,
broker_ids.len(),
err_buf.as_mut_ptr(),
err_buf.capacity(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
}
Ok(partitions)
}
}
type NativeNewPartitions = NativePtr<RDKafkaNewPartitions>;
unsafe impl KafkaDrop for RDKafkaNewPartitions {
const TYPE: &'static str = "new partitions";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_NewPartitions_destroy;
}
struct CreatePartitionsFuture {
rx: oneshot::Receiver<NativeEvent>,
}
impl Future for CreatePartitionsFuture {
type Output = KafkaResult<Vec<TopicResult>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let event = ready!(self.rx.poll_unpin(cx)).map_err(|_| KafkaError::Canceled)?;
event.check_error()?;
let res = unsafe { rdsys::rd_kafka_event_CreatePartitions_result(event.ptr()) };
if res.is_null() {
let typ = unsafe { rdsys::rd_kafka_event_type(event.ptr()) };
return Poll::Ready(Err(KafkaError::AdminOpCreation(format!(
"create partitions request received response of incorrect type ({})",
typ
))));
}
let mut n = 0;
let topics = unsafe { rdsys::rd_kafka_CreatePartitions_result_topics(res, &mut n) };
Poll::Ready(Ok(build_topic_results(topics, n)))
}
}
//
// Describe configs handling
//
/// The result of an individual DescribeConfig operation.
pub type ConfigResourceResult = Result<ConfigResource, RDKafkaErrorCode>;
/// Specification of a configurable resource.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ResourceSpecifier<'a> {
/// A topic resource, identified by its name.
Topic(&'a str),
/// A group resource, identified by its ID.
Group(&'a str),
/// A broker resource, identified by its ID.
Broker(i32),
}
/// A `ResourceSpecifier` that owns its data.
#[derive(Debug, Eq, PartialEq)]
pub enum OwnedResourceSpecifier {
/// A topic resource, identified by its name.
Topic(String),
/// A group resource, identified by its ID.
Group(String),
/// A broker resource, identified by its ID.
Broker(i32),
}
/// The source of a configuration entry.
#[derive(Debug, Eq, PartialEq)]
pub enum ConfigSource {
/// Unknown. Note that Kafka brokers before v1.1.0 do not reliably provide
/// configuration source information.
Unknown,
/// A dynamic topic configuration.
DynamicTopic,
/// A dynamic broker configuration.
DynamicBroker,
/// The default dynamic broker configuration.
DynamicDefaultBroker,
/// The static broker configuration.
StaticBroker,
/// The hardcoded default configuration.
Default,
}
/// An individual configuration parameter for a `ConfigResource`.
#[derive(Debug, Eq, PartialEq)]
pub struct ConfigEntry {
/// The name of the configuration parameter.
pub name: String,
/// The value of the configuration parameter.
pub value: Option<String>,
/// The source of the configuration parameter.
pub source: ConfigSource,
/// Whether the configuration parameter is read only.
pub is_read_only: bool,
/// Whether the configuration parameter currently has the default value.
pub is_default: bool,
/// Whether the configuration parameter contains sensitive data.
pub is_sensitive: bool,
}
/// A configurable resource and its current configuration values.
#[derive(Debug)]
pub struct ConfigResource {
/// Identifies the resource.
pub specifier: OwnedResourceSpecifier,
/// The current configuration parameters.
pub entries: Vec<ConfigEntry>,
}
impl ConfigResource {
/// Builds a `HashMap` of configuration entries, keyed by configuration
/// entry name.
pub fn entry_map(&self) -> HashMap<&str, &ConfigEntry> {
self.entries.iter().map(|e| (&*e.name, e)).collect()
}
/// Searches the configuration entries to find the named parameter.
///
/// For more efficient lookups, use `entry_map` to build a `HashMap`
/// instead.
pub fn get(&self, name: &str) -> Option<&ConfigEntry> {
self.entries.iter().find(|e| e.name == name)
}
}
type NativeConfigResource = NativePtr<RDKafkaConfigResource>;
unsafe impl KafkaDrop for RDKafkaConfigResource {
const TYPE: &'static str = "config resource";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_ConfigResource_destroy;
}
fn extract_config_specifier(
resource: *const RDKafkaConfigResource,
) -> KafkaResult<OwnedResourceSpecifier> {
let typ = unsafe { rdsys::rd_kafka_ConfigResource_type(resource) };
match typ {
RDKafkaResourceType::RD_KAFKA_RESOURCE_TOPIC => {
let name = unsafe { cstr_to_owned(rdsys::rd_kafka_ConfigResource_name(resource)) };
Ok(OwnedResourceSpecifier::Topic(name))
}
RDKafkaResourceType::RD_KAFKA_RESOURCE_GROUP => {
let name = unsafe { cstr_to_owned(rdsys::rd_kafka_ConfigResource_name(resource)) };
Ok(OwnedResourceSpecifier::Group(name))
}
RDKafkaResourceType::RD_KAFKA_RESOURCE_BROKER => {
let name = unsafe { CStr::from_ptr(rdsys::rd_kafka_ConfigResource_name(resource)) }
.to_string_lossy();
match name.parse::<i32>() {
Ok(id) => Ok(OwnedResourceSpecifier::Broker(id)),
Err(_) => Err(KafkaError::AdminOpCreation(format!(
"bogus broker ID in kafka response: {}",
name
))),
}
}
_ => Err(KafkaError::AdminOpCreation(format!(
"bogus resource type in kafka response: {:?}",
typ
))),
}
}
fn extract_config_source(config_source: RDKafkaConfigSource) -> KafkaResult<ConfigSource> {
match config_source {
RDKafkaConfigSource::RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG => Ok(ConfigSource::Unknown),
RDKafkaConfigSource::RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG => {
Ok(ConfigSource::DynamicTopic)
}
RDKafkaConfigSource::RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG => {
Ok(ConfigSource::DynamicBroker)
}
RDKafkaConfigSource::RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG => {
Ok(ConfigSource::DynamicDefaultBroker)
}
RDKafkaConfigSource::RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG => {
Ok(ConfigSource::StaticBroker)
}
RDKafkaConfigSource::RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG => Ok(ConfigSource::Default),
_ => Err(KafkaError::AdminOpCreation(format!(
"bogus config source type in kafka response: {:?}",
config_source,
))),
}
}
struct DescribeConfigsFuture {
rx: oneshot::Receiver<NativeEvent>,
}
impl Future for DescribeConfigsFuture {
type Output = KafkaResult<Vec<ConfigResourceResult>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let event = ready!(self.rx.poll_unpin(cx)).map_err(|_| KafkaError::Canceled)?;
event.check_error()?;
let res = unsafe { rdsys::rd_kafka_event_DescribeConfigs_result(event.ptr()) };
if res.is_null() {
let typ = unsafe { rdsys::rd_kafka_event_type(event.ptr()) };
return Poll::Ready(Err(KafkaError::AdminOpCreation(format!(
"describe configs request received response of incorrect type ({})",
typ
))));
}
let mut n = 0;
let resources = unsafe { rdsys::rd_kafka_DescribeConfigs_result_resources(res, &mut n) };
let mut out = Vec::with_capacity(n);
for i in 0..n {
let resource = unsafe { *resources.add(i) };
let specifier = extract_config_specifier(resource)?;
let mut entries_out = Vec::new();
let mut n = 0;
let entries = unsafe { rdsys::rd_kafka_ConfigResource_configs(resource, &mut n) };
for j in 0..n {
let entry = unsafe { *entries.add(j) };
let name = unsafe { cstr_to_owned(rdsys::rd_kafka_ConfigEntry_name(entry)) };
let value = unsafe {
let value = rdsys::rd_kafka_ConfigEntry_value(entry);
if value.is_null() {
None
} else {
Some(cstr_to_owned(value))
}
};
entries_out.push(ConfigEntry {
name,
value,
source: extract_config_source(unsafe {
rdsys::rd_kafka_ConfigEntry_source(entry)
})?,
is_read_only: unsafe { rdsys::rd_kafka_ConfigEntry_is_read_only(entry) } != 0,
is_default: unsafe { rdsys::rd_kafka_ConfigEntry_is_default(entry) } != 0,
is_sensitive: unsafe { rdsys::rd_kafka_ConfigEntry_is_sensitive(entry) } != 0,
});
}
out.push(Ok(ConfigResource {
specifier,
entries: entries_out,
}))
}
Poll::Ready(Ok(out))
}
}
//
// Alter configs handling
//
/// The result of an individual AlterConfig operation.
pub type AlterConfigsResult =
Result<OwnedResourceSpecifier, (OwnedResourceSpecifier, RDKafkaErrorCode)>;
/// Configuration for an AlterConfig operation.
pub struct AlterConfig<'a> {
/// Identifies the resource to be altered.
pub specifier: ResourceSpecifier<'a>,
/// The configuration parameters to be updated.
pub entries: HashMap<&'a str, &'a str>,
}
impl<'a> AlterConfig<'a> {
/// Creates a new `AlterConfig`.
pub fn new(specifier: ResourceSpecifier<'_>) -> AlterConfig<'_> {
AlterConfig {
specifier,
entries: HashMap::new(),
}
}
/// Sets the configuration parameter named `key` to the specified `value`.
pub fn set(mut self, key: &'a str, value: &'a str) -> AlterConfig<'a> {
self.entries.insert(key, value);
self
}
fn to_native(&self, err_buf: &mut ErrBuf) -> KafkaResult<NativeConfigResource> {
let (name, typ) = match self.specifier {
ResourceSpecifier::Topic(name) => (
CString::new(name)?,
RDKafkaResourceType::RD_KAFKA_RESOURCE_TOPIC,
),
ResourceSpecifier::Group(name) => (
CString::new(name)?,
RDKafkaResourceType::RD_KAFKA_RESOURCE_GROUP,
),
ResourceSpecifier::Broker(id) => (
CString::new(format!("{}", id))?,
RDKafkaResourceType::RD_KAFKA_RESOURCE_BROKER,
),
};
// N.B.: we wrap config immediately, so that it is destroyed via the
// NativeNewTopic's Drop implementation if config installation fails.
let config = unsafe {
NativeConfigResource::from_ptr(rdsys::rd_kafka_ConfigResource_new(typ, name.as_ptr()))
.unwrap()
};
for (key, val) in &self.entries {
let key_c = CString::new(*key)?;
let val_c = CString::new(*val)?;
let res = unsafe {
rdsys::rd_kafka_ConfigResource_set_config(
config.ptr(),
key_c.as_ptr(),
val_c.as_ptr(),
)
};
check_rdkafka_invalid_arg(res, err_buf)?;
}
Ok(config)
}
}
struct AlterConfigsFuture {
rx: oneshot::Receiver<NativeEvent>,
}
impl Future for AlterConfigsFuture {
type Output = KafkaResult<Vec<AlterConfigsResult>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let event = ready!(self.rx.poll_unpin(cx)).map_err(|_| KafkaError::Canceled)?;
event.check_error()?;
let res = unsafe { rdsys::rd_kafka_event_AlterConfigs_result(event.ptr()) };
if res.is_null() {
let typ = unsafe { rdsys::rd_kafka_event_type(event.ptr()) };
return Poll::Ready(Err(KafkaError::AdminOpCreation(format!(
"alter configs request received response of incorrect type ({})",
typ
))));
}
let mut n = 0;
let resources = unsafe { rdsys::rd_kafka_AlterConfigs_result_resources(res, &mut n) };
let mut out = Vec::with_capacity(n);
for i in 0..n {
let resource = unsafe { *resources.add(i) };
let specifier = extract_config_specifier(resource)?;
out.push(Ok(specifier));
}
Poll::Ready(Ok(out))
}
}
|
broker_id
|
_lightposition.py
|
import _plotly_utils.basevalidators
class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator):
def
|
(self, plotly_name="lightposition", parent_name="volume", **kwargs):
super(LightpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lightposition"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
""",
),
**kwargs
)
|
__init__
|
view.rs
|
use std::collections::linked_list::Iter;
use std::iter::Zip;
pub fn
|
(m: Zip<Iter<String>, Iter<String>>) {
for (active_line, stable_line) in m {
active_line
.chars()
.enumerate()
.for_each(|(index, character)| {
if character == '@' {
print!("{}", "@");
} else {
print!("{}", stable_line.chars().nth(index).unwrap());
};
});
println!();
}
}
|
show
|
client.py
|
r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|\_____________________________
| | getresponse() raises
| response = getresponse() | ConnectionError
v v
Unread-response Idle
[Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import email.parser
import email.message
import http
import io
import re
import socket
import collections.abc
from urllib.parse import urlsplit
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
"responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# hack to maintain backwards compatibility
globals().update(http.HTTPStatus.__members__)
# another hack to maintain backwards compatibility
# Mapping status codes to official W3C names
responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more lenient than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
# These characters are not allowed within HTTP URL paths.
# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
# Prevents CVE-2019-9740. Includes control characters such as \r\n.
# We don't restrict chars above \x7f as putrequest() limits us to ASCII.
_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
# Arguably only these _should_ allowed:
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
raise UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name)) from None
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr, val in self.headers.items():
print("header:", hdr + ":", val)
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
"""Always returns True"""
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b and return the number
of bytes read.
"""
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk, discard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
data = self.fp.read(amt)
if len(data) < amt:
raise IncompleteRead(data, amt-len(data))
return data
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
amt = len(b)
n = self.fp.readinto(b)
if n < amt:
raise IncompleteRead(bytes(b[:n]), amt-n)
return n
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
result = self.fp.read1(n)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
'''Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady.
'''
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
'''Returns an instance of the class mimetools.Message containing
meta-information associated with the URL.
When the method is HTTP, these headers are those returned by
the server at the head of the retrieved HTML page (including
Content-Length and Content-Type).
When the method is FTP, a Content-Length header will be
present if (as is now usual) the server passed back a file
length in response to the FTP retrieval request. A
Content-Type header will be present if the MIME type can be
guessed.
When the method is local-file, returned headers will include
a Date representing the file's last-modified time, a
Content-Length giving file size, and a Content-Type
containing a guess at the file's type. See also the
description of the mimetools module.
'''
return self.headers
def geturl(self):
'''Return the real URL of the page.
In some cases, the HTTP server redirects a client to another
URL. The urlopen() function handles this transparently, but in
some cases the caller needs to know which URL the client was
redirected to. The geturl() method can be used to get at this
redirected URL.
'''
return self.url
def getcode(self):
'''Return the HTTP status code that was sent with the response,
or None if the URL is not an HTTP URL.
'''
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
@staticmethod
def _is_textIO(stream):
|
@staticmethod
def _get_content_length(body, method):
"""Get the content-length based on the body.
If the body is None, we set Content-Length: 0 for methods that expect
a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
any method if the body is a str or bytes-like object and not a file.
"""
if body is None:
# do an explicit check for not None here to distinguish
# between unset and set but empty
if method.upper() in _METHODS_EXPECTING_BODY:
return 0
else:
return None
if hasattr(body, 'read'):
# file-like object.
return None
try:
# does it implement the buffer protocol (bytes, bytearray, array)?
mv = memoryview(body)
return mv.nbytes
except TypeError:
pass
if isinstance(body, str):
return len(body)
return None
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, blocksize=8192):
self.timeout = timeout
self.source_address = source_address
self.blocksize = blocksize
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTML connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != http.HTTPStatus.OK:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
if self.debuglevel > 0:
print('header:', line.decode())
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection(
(self.host,self.port), self.timeout, self.source_address)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(data)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.abc.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _read_readable(self, readable):
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while True:
datablock = readable.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
def _send_output(self, message_body=None, encode_chunked=False):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
# create a consistent interface to message_body
if hasattr(message_body, 'read'):
# Let file-like take precedence over byte-like. This
# is needed to allow the current position of mmap'ed
# files to be taken into account.
chunks = self._read_readable(message_body)
else:
try:
# this is solely to check to see if message_body
# implements the buffer API. it /would/ be easier
# to capture if PyObject_CheckBuffer was exposed
# to Python.
memoryview(message_body)
except TypeError:
try:
chunks = iter(message_body)
except TypeError:
raise TypeError("message_body should be a bytes-like "
"object or an iterable, got %r"
% type(message_body))
else:
# the object implements the buffer interface and
# can be passed directly into socket methods
chunks = (message_body,)
for chunk in chunks:
if not chunk:
if self.debuglevel > 0:
print('Zero length chunk ignored')
continue
if encode_chunked and self._http_vsn == 11:
# chunked encoding
chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \
+ b'\r\n'
self.send(chunk)
if encode_chunked and self._http_vsn == 11:
# end chunked transfer
self.send(b'0\r\n\r\n')
def putrequest(self, method, url, skip_host=False,
skip_accept_encoding=False):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
# Prevent CVE-2019-9740.
if match := _contains_disallowed_url_pchar_re.search(url):
raise InvalidURL(f"URL can't contain control characters. {url!r} "
f"(found at least {match.group()!r})")
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
if _is_illegal_header_value(values[i]):
raise ValueError('Invalid header value %r' % (values[i],))
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None, *, encode_chunked=False):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, encode_chunked=encode_chunked)
def request(self, method, url, body=None, headers={}, *,
encode_chunked=False):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers, encode_chunked)
def _send_request(self, method, url, body, headers, encode_chunked):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = frozenset(k.lower() for k in headers)
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
# chunked encoding will happen if HTTP/1.1 is used and either
# the caller passes encode_chunked=True or the following
# conditions hold:
# 1. content-length has not been explicitly set
# 2. the body is a file or iterable, but not a str or bytes-like
# 3. Transfer-Encoding has NOT been explicitly set by the caller
if 'content-length' not in header_names:
# only chunk body if not explicitly set for backwards
# compatibility, assuming the client code is already handling the
# chunking
if 'transfer-encoding' not in header_names:
# if content-length cannot be automatically determined, fall
# back to chunked encoding
encode_chunked = False
content_length = self._get_content_length(body, method)
if content_length is None:
if body is not None:
if self.debuglevel > 0:
print('Unable to determine size of %r' % body)
encode_chunked = True
self.putheader('Transfer-Encoding', 'chunked')
else:
self.putheader('Content-Length', str(content_length))
else:
encode_chunked = False
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
self.endheaders(body, encode_chunked=encode_chunked)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
try:
response.begin()
except ConnectionError:
self.close()
raise
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None, blocksize=8192):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address,
blocksize=blocksize)
if (key_file is not None or cert_file is not None or
check_hostname is not None):
import warnings
warnings.warn("key_file, cert_file and check_hostname are "
"deprecated, use a custom context instead.",
DeprecationWarning, 2)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
if check_hostname is not None:
self._context.check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
class RemoteDisconnected(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
# for backwards compatibility
error = HTTPException
|
"""Test whether a file-like object is a text or a binary stream.
"""
return isinstance(stream, io.TextIOBase)
|
resolveCollectionCompletions.ts
|
import { CompletionItemKind } from 'vscode-languageserver';
import { CompletionItem } from 'vscode-languageserver-types';
import { getConditionCompletionItems } from '../../../../suggestions/defaults/conditionItems';
import { getRoot, ISuggestionRequest } from '../../../../suggestions/suggestionManager';
import { exclusiveResult, nonExclusiveResult, EmptyCompletionResult, ICompletionResult } from '../../../tagManager';
import { getCollectionBlueprintFields, getTaxonomyCompletionItems } from './utils';
export function resolveCollectionCompletions(params: ISuggestionRequest): ICompletionResult {
let items: CompletionItem[] = [];
if (params.currentSymbol != null && params.currentSymbol.currentScope != null) {
const blueprintFields = getCollectionBlueprintFields(params.currentSymbol, params.currentSymbol.currentScope),
fieldNames = blueprintFields.map((f) => f.name),
rootLeft = getRoot(params.leftWord);
if (rootLeft === 'taxonomy') {
return exclusiveResult(getTaxonomyCompletionItems(params));
}
if (fieldNames.includes(rootLeft)) {
items = getConditionCompletionItems(params);
return exclusiveResult(items);
}
if (params.isCaretInTag && params.activeParameter == null && ['collection', '/collection'].includes(params.leftWord) == false) {
const addedNames: string[] = [];
for (let i = 0; i < blueprintFields.length; i++) {
const thisField = blueprintFields[i];
if (addedNames.includes(thisField.name) == false) {
items.push({
label: thisField.name,
detail: thisField.blueprintName,
documentation: thisField.instructionText ?? '',
kind: CompletionItemKind.Field
});
addedNames.push(thisField.name);
}
|
label: 'taxonomy',
insertText: 'taxonomy:',
kind: CompletionItemKind.Field
});
items.push({
label: 'status',
insertText: 'status:',
kind: CompletionItemKind.Field
});
if (items.length > 0) {
return nonExclusiveResult(items);
}
}
}
if (params.isPastTagPart == false && (params.leftWord == 'collection' || params.leftWord == '/collection') && params.leftChar == ':') {
for (let i = 0; i < params.project.collectionNames.length; i++) {
items.push({
label: params.project.collectionNames[i],
kind: CompletionItemKind.Field
});
}
items.push({ label: 'count', kind: CompletionItemKind.Text });
items.push({ label: 'next', kind: CompletionItemKind.Text });
items.push({ label: 'previous', kind: CompletionItemKind.Text });
items.push({ label: 'older', kind: CompletionItemKind.Text });
items.push({ label: 'newer', kind: CompletionItemKind.Text });
return {
items: items,
analyzeDefaults: false,
isExclusiveResult: false,
};
}
return EmptyCompletionResult;
}
|
}
items.push({
|
user_list_date_rule_item_operator.pb.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.22.0
// protoc v3.12.3
// source: google/ads/googleads/v4/enums/user_list_date_rule_item_operator.proto
package enums
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Enum describing possible user list date rule item operators.
type UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator int32
const (
// Not specified.
UserListDateRuleItemOperatorEnum_UNSPECIFIED UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator = 0
// Used for return value only. Represents value unknown in this version.
UserListDateRuleItemOperatorEnum_UNKNOWN UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator = 1
// Equals.
UserListDateRuleItemOperatorEnum_EQUALS UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator = 2
// Not Equals.
UserListDateRuleItemOperatorEnum_NOT_EQUALS UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator = 3
// Before.
UserListDateRuleItemOperatorEnum_BEFORE UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator = 4
// After.
UserListDateRuleItemOperatorEnum_AFTER UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator = 5
)
// Enum value maps for UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator.
var (
UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNKNOWN",
2: "EQUALS",
3: "NOT_EQUALS",
4: "BEFORE",
5: "AFTER",
}
UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator_value = map[string]int32{
"UNSPECIFIED": 0,
"UNKNOWN": 1,
"EQUALS": 2,
"NOT_EQUALS": 3,
"BEFORE": 4,
"AFTER": 5,
}
)
func (x UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator) Enum() *UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator {
p := new(UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator)
*p = x
return p
}
func (x UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator) Descriptor() protoreflect.EnumDescriptor {
return file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_enumTypes[0].Descriptor()
}
func (UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator) Type() protoreflect.EnumType {
return &file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_enumTypes[0]
}
func (x UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator.Descriptor instead.
func (UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator) EnumDescriptor() ([]byte, []int) {
return file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescGZIP(), []int{0, 0}
}
// Supported rule operator for date type.
type UserListDateRuleItemOperatorEnum struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *UserListDateRuleItemOperatorEnum) Reset() {
*x = UserListDateRuleItemOperatorEnum{}
if protoimpl.UnsafeEnabled
|
}
func (x *UserListDateRuleItemOperatorEnum) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserListDateRuleItemOperatorEnum) ProtoMessage() {}
func (x *UserListDateRuleItemOperatorEnum) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserListDateRuleItemOperatorEnum.ProtoReflect.Descriptor instead.
func (*UserListDateRuleItemOperatorEnum) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescGZIP(), []int{0}
}
var File_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDesc = []byte{
0x0a, 0x45, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x34, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f,
0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x72,
0x75, 0x6c, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f,
0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x34,
0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x01, 0x0a, 0x20, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x69, 0x73,
0x74, 0x44, 0x61, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x4f, 0x70, 0x65,
0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x75, 0x6d, 0x22, 0x6f, 0x0a, 0x1c, 0x55, 0x73, 0x65,
0x72, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x49, 0x74, 0x65,
0x6d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53,
0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x51, 0x55, 0x41, 0x4c,
0x53, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c,
0x53, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x45, 0x46, 0x4f, 0x52, 0x45, 0x10, 0x04, 0x12,
0x09, 0x0a, 0x05, 0x41, 0x46, 0x54, 0x45, 0x52, 0x10, 0x05, 0x42, 0xf6, 0x01, 0x0a, 0x21, 0x63,
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x34, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73,
0x42, 0x21, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x65, 0x52, 0x75,
0x6c, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f,
0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73,
0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x34, 0x2f, 0x65, 0x6e,
0x75, 0x6d, 0x73, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa,
0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x34, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xca,
0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x34, 0x5c, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xea,
0x02, 0x21, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x34, 0x3a, 0x3a, 0x45, 0x6e,
0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescOnce sync.Once
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescData = file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDesc
)
func file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescGZIP() []byte {
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescData)
})
return file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDescData
}
var file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_goTypes = []interface{}{
(UserListDateRuleItemOperatorEnum_UserListDateRuleItemOperator)(0), // 0: google.ads.googleads.v4.enums.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator
(*UserListDateRuleItemOperatorEnum)(nil), // 1: google.ads.googleads.v4.enums.UserListDateRuleItemOperatorEnum
}
var file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_init() }
func file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_init() {
if File_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UserListDateRuleItemOperatorEnum); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDesc,
NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_depIdxs,
EnumInfos: file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_enumTypes,
MessageInfos: file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_msgTypes,
}.Build()
File_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto = out.File
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_rawDesc = nil
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_goTypes = nil
file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_depIdxs = nil
}
|
{
mi := &file_google_ads_googleads_v4_enums_user_list_date_rule_item_operator_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
|
array.rs
|
#![cfg(min_const_generics)]
//! Parallel iterator types for [arrays] (`[T; N]`)
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! Everything in this module requires const generics, stabilized in Rust 1.51.
//!
//! [arrays]: https://doc.rust-lang.org/std/primitive.array.html
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::slice::{Iter, IterMut};
use crate::vec::DrainProducer;
use std::mem::ManuallyDrop;
/// This implementation requires const generics, stabilized in Rust 1.51.
impl<'data, T: Sync + 'data, const N: usize> IntoParallelIterator for &'data [T; N] {
type Item = &'data T;
type Iter = Iter<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&[T]>::into_par_iter(self)
}
}
/// This implementation requires const generics, stabilized in Rust 1.51.
impl<'data, T: Send + 'data, const N: usize> IntoParallelIterator for &'data mut [T; N] {
type Item = &'data mut T;
type Iter = IterMut<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&mut [T]>::into_par_iter(self)
}
}
/// This implementation requires const generics, stabilized in Rust 1.51.
impl<T: Send, const N: usize> IntoParallelIterator for [T; N] {
type Item = T;
type Iter = IntoIter<T, N>;
|
}
}
/// Parallel iterator that moves out of an array.
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send, const N: usize> {
array: [T; N],
}
impl<T: Send, const N: usize> ParallelIterator for IntoIter<T, N> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(N)
}
}
impl<T: Send, const N: usize> IndexedParallelIterator for IntoIter<T, N> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
N
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
unsafe {
// Drain every item, and then the local array can just fall out of scope.
let mut array = ManuallyDrop::new(self.array);
callback.callback(DrainProducer::new(&mut *array))
}
}
}
|
fn into_par_iter(self) -> Self::Iter {
IntoIter { array: self }
|
ui.go
|
// simple ui - TODO use something more sophisticated :)
package ui
import (
"io"
"os"
)
var (
uiOut = NewStdoutUI(Verbose)
uiErr = NewStderrUI(Verbose)
ui = uiOut
)
func NewUI(verbose bool, writer io.Writer) *UI {
return &UI{
Verbose: verbose,
Writer: writer,
}
}
func NewStdoutUI(verbose bool) *UI {
return &UI{
Verbose: verbose,
Writer: os.Stdout,
}
}
func NewStderrUI(verbose bool) *UI {
return &UI{
Verbose: verbose,
Writer: os.Stderr,
}
}
type UI struct {
Verbose bool
Writer io.Writer
}
func SetVerbose(verbose bool) { ui.Verbose = verbose }
func ExitOnError(item string, errors ...error) { ui.ExitOnError(item, errors...) }
func PrintOnError(item string, errors ...error) { ui.PrintOnError(item, errors...) }
func WarnOnError(item string, errors ...error) { ui.WarnOnError(item, errors...) }
func Logo() { ui.Logo() }
func LogoNoColor() { ui.LogoNoColor() }
func NL() { ui.NL() }
func Success(message string, subMessages ...string) { ui.Success(message, subMessages...) }
func SuccessAndExit(message string, subMessages ...string) {
ui.SuccessAndExit(message, subMessages...)
}
func Warn(message string, subMessages ...string) { ui.Warn(message, subMessages...) }
func LogLine(message string) { ui.LogLine(message) }
func Debug(message string, subMessages ...string) { ui.Debug(message, subMessages...) }
func Info(message string, subMessages ...string) { ui.Info(message, subMessages...) }
func Err(err error) { ui.Err(err) }
func Errf(err string, params ...interface{}) { ui.Errf(err, params...) }
func Fail(err error) { ui.Fail(err) }
func Failf(err string, params ...interface{}) { ui.Failf(err, params...) }
func CommandOutput(output []byte, command string, params ...string) {
ui.CommandOutput(output, command, params...)
}
func Medal() { ui.Medal() }
func Completed(message string, subMessages ...string) { ui.Completed(message, subMessages...) }
func GroupCompleted(main string, sub ...string) { ui.GroupCompleted(main, sub...) }
func InfoGrid(table map[string]string) { ui.InfoGrid(table) }
func Vector(table []string) { ui.Vector(table) }
func ShellCommand(title string, commands ...string) { ui.ShellCommand(title, commands...) }
func Table(tableData TableData, writer io.Writer) { ui.Table(tableData, writer) }
func JSONTable(tableData TableData, writer io.Writer) error { return ui.JSONTable(tableData, writer) }
func
|
(a [][]string) ArrayTable { return ui.NewArrayTable(a) }
func UseStdout() { ui = uiOut }
func UseStderr() { ui = uiErr }
|
NewArrayTable
|
node.rs
|
extern crate uom;
extern crate serde;
use std::fmt;
use serde::ser::{Serialize, Serializer, SerializeStruct};
use serde::de::{self, Deserialize, Deserializer, Visitor, SeqAccess, MapAccess};
use regex::Regex;
use std::fmt::Debug;
///
/// Trait for active expression node.
///
pub trait Cherries {
fn name(&self) -> &String;
fn value(&self) -> std::result::Result<f32, String>;
fn symbol(&self) -> String;
fn to_json(&self) -> String;
}
///
/// Expression node.
///
#[derive(Clone, Debug)]
pub struct Cherry<T: Clone + Debug> {
label: String,
value: T,
previous: Option<String>,
}
impl<T: Clone + Debug + PartialEq> PartialEq for Cherry<T> {
fn eq(&self, other: &Self) -> bool {
(self.label == other.label)
&& (self.value == other.value)
&& (self.previous == other.previous)
}
}
impl<T: Clone + Debug + Serialize> Serialize for Cherry<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer
{
let mut state = serializer.serialize_struct("Cherry", 3)?;
state.serialize_field("label", &self.label)?;
state.serialize_field("value", &self.value)?;
state.serialize_field("previous", &self.previous)?;
state.end()
}
}
#[derive(Clone, Debug)]
struct CherryVisitor<T: Clone + Debug> {
value_type: std::marker::PhantomData<T>,
}
impl<'de, T: Clone + Debug + Deserialize<'de>> CherryVisitor<T> {
fn new() -> Self {
CherryVisitor { value_type: std::marker::PhantomData }
}
}
impl<'de, T: Clone + Debug + Deserialize<'de>> serde::de::Visitor<'de> for CherryVisitor<T> {
type Value = Cherry<T>;
fn expecting(&self, _: &mut std::fmt::Formatter<'_>) -> fmt::Result {
unimplemented!()
}
fn visit_seq<V>(self, mut seq: V) -> Result<Cherry<T>, V::Error>
where
V: SeqAccess<'de>,
{
let label = seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let value = seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let previous = seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(2, &self))?;
Ok(Cherry{label, value, previous})
}
fn visit_map<V>(self, mut map: V) -> Result<Cherry<T>, V::Error>
where
V: MapAccess<'de>,
{
enum Field { Label, Value, Previous };
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("`label`, `value`, or `previous`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"label" => Ok(Field::Label),
"value" => Ok(Field::Value),
"previous" => Ok(Field::Previous),
_ => Err(de::Error::unknown_field(value, &["label", "value", "previous"])),
}
}
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
let mut label = None;
let mut value = None;
let mut previous = None;
while let Some(key) = map.next_key()? {
match key {
Field::Label => {
if label.is_some() {
return Err(de::Error::duplicate_field("label"));
}
label = Some(map.next_value()?);
}
Field::Value => {
if value.is_some() {
return Err(de::Error::duplicate_field("value"));
}
value = Some(map.next_value()?);
}
Field::Previous => {
if previous.is_some() {
return Err(de::Error::duplicate_field("previous"));
}
previous = Some(map.next_value()?);
}
}
}
let label = label.ok_or_else(|| de::Error::missing_field("label"))?;
let value = value.ok_or_else(|| de::Error::missing_field("value"))?;
let previous = previous.ok_or_else(|| de::Error::missing_field("previous"))?;
Ok(Cherry{label, value, previous})
}
}
impl<'de, T: Clone + Debug + Deserialize<'de>> Deserialize<'de> for Cherry<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
const FIELDS: &[&str] = &["label", "value", "previous"];
let visitor: CherryVisitor<T> = CherryVisitor::new();
deserializer.deserialize_struct("Duration", FIELDS, visitor)
}
}
impl<T: Clone + Debug> Cherries for Cherry<T> {
///
/// Returns reference of node name .
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
///
/// let node = Leaf::new().value(1).name("node").build();
/// assert_eq!(node.name(), &"node".to_string());
/// ```
fn name(&self) -> &String {
self.name()
}
///
/// Returns node value or error string.
///
/// This method try to parse value from format string for uom support.
/// There should be some other better way (help me, please!).
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let node = Leaf::new().value(1).name("node").build();
/// assert_eq!(node.value(), Ok(1.0));
/// let node = Leaf::new().value(Length::new::<meter>(2.0)).name("node").build();
/// assert_eq!(node.value(), Ok(2.0));
/// ```
fn value(&self) -> std::result::Result<f32, String> {
let re = Regex::new(r#"^(.*?) .*$"#).unwrap();
let formats = format!("{:?}", self.quantity());
match formats.parse::<f32>() {
Ok(value) => Ok(value),
Err(_) => re.captures_iter(formats.clone().as_str()).last().map_or(
Err(formats.clone()),
|x| {
x.get(1).map_or(Err(formats.clone()), |x| {
x.as_str().parse::<f32>().map_err(|_| formats)
})
},
),
}
}
///
/// Returns units symbol.
///
/// Returns node qunatity units symbol string (if has quantity) or `dimensionless`.
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let node = Leaf::new().value(1).name("node").build();
/// assert_eq!(node.symbol(), "dimensionless".to_string());
/// let node = Leaf::new().value(Length::new::<meter>(2.0)).name("node").build();
/// assert_eq!(node.symbol(), "m^1".to_string());
/// ```
fn symbol(&self) -> String {
let re = Regex::new(r#".*? (.*)"#).unwrap();
let formats = format!("{:?}", self.quantity());
re.captures_iter(formats.as_str())
.last()
.map(|x| {
x.get(1)
.map(|x| x.as_str().to_string())
.unwrap_or_else(|| "dimensionless".to_string())
})
.unwrap_or_else(|| "dimensionless".to_string())
}
///
/// Returns expression log as json string.
///
/// The json has `label (string)`, `value (number)`, `units (string)`, and `subexpr (array of object)`.
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let x = Leaf::new().value(1.0).name("x").build();
/// let y = Leaf::new().value(Length::new::<meter>(2.0)).name("y").build();
/// let res = x * y;
/// assert_eq!(
/// res.to_json(),
/// "{\
/// \"label\":\"(mul)\",\
/// \"value\":2,\
/// \"unit\":\"m^1\",\
/// \"subexpr\":[\
/// {\
/// \"label\":\"x\",\
/// \"value\":1,\
/// \"unit\":\"dimensionless\"\
/// },\
/// {\
/// \"label\":\"y\",\
/// \"value\":2,\
/// \"unit\":\"m^1\"\
/// }\
/// ]\
/// }".to_string()
/// );
/// ```
fn to_json(&self) -> String {
match &self.previous {
Some(prev) => {
format!(
"{{\"label\":\"{label}\",\"value\":{value},\"unit\":\"{unit}\",\"subexpr\":[{subexpr}]}}",
label = self.label,
unit = self.symbol(),
value = self.value().unwrap(),
subexpr = prev)
},
None => {
format!(
"{{\"label\":\"{label}\",\"value\":{value},\"unit\":\"{unit}\"}}",
label = self.label,
unit = self.symbol(),
value = self.value().unwrap()
)
}
}
}
}
impl<T: Clone + Debug> Cherry<T> {
///
/// Returns reference of quantity which node has.
///
/// Returns node qunatity (if has quantity) or value (if dimensionless).
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let node = Leaf::new().value(1).name("node").build();
/// assert_eq!(node.quantity(), &1);
/// let node = Leaf::new().value(Length::new::<meter>(2.0)).name("y").build();
/// assert_eq!(node.quantity(), &Length::new::<meter>(2.0));
///
/// ```
pub fn quantity(&self) -> &T {
&self.value
}
///
/// Returns reference of node name .
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
///
/// let node = Leaf::new().value(1).name("node").build();
/// assert_eq!(node.name(), &"node".to_string());
/// ```
pub fn name(&self) -> &String {
&self.label
}
///
/// Returns node which renamed (and sonsuming self).
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
///
/// let node = Leaf::new().value(1).name("node").build();
/// assert_eq!(node.name(), &"node".to_string());
/// let node = node.labeled("renamed");
/// assert_eq!(node.name(), &"renamed".to_string());
/// ```
pub fn labeled<S: Into<String>>(self, name: S) -> Cherry<T> {
Cherry {
label: name.into(),
value: self.value,
previous: self.previous,
}
}
///
/// Maps a `Cherry<T>` to `Cherry<U>` by applying a function to a contained quantity.
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let x = Leaf::new()
/// .name("x")
/// .value(Length::new::<meter>(2.1))
/// .build();
/// let res = x.map(|x| x.floor::<meter>()).labeled("floor");
/// assert_eq!(&Length::new::<meter>(2.0), res.quantity());
/// ```
pub fn map<F: FnOnce(&T) -> U, U: Clone + Debug>(&self, f: F) -> Cherry<U> {
Node::new()
.name("(map)")
.value(f(self.quantity()))
.prev(self.to_json())
.build()
}
///
/// Returns `Ok(&self)` if `predicate(self.quantity())` is true, otherwise returns `Err(&self)`.
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let x = Leaf::new()
/// .name("x")
/// .value(Length::new::<meter>(2.1))
/// .build();
/// let res = x.is_satisfy_with(|x| x < &Length::new::<meter>(2.0));
/// assert_eq!(Err(&x), res);
/// ```
pub fn is_satisfy_with<Predicate: FnOnce(&T) -> bool>(
&self,
predicate: Predicate,
) -> std::result::Result<&Self, &Self> {
if predicate(&self.value) {
Ok(self)
} else {
Err(self)
}
}
///
/// Applies `self.quantity()` to given function `f` and returns its result.
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
/// extern crate uom;
/// use uom::si::{f32::*, length::meter};
///
/// let x = Leaf::new()
/// .name("x")
/// .value(Length::new::<meter>(2.1))
/// .build();
/// let res = x.with(|x| x < &Length::new::<meter>(2.0));
/// assert_eq!(res, false);
///
/// ```
pub fn with<U, F: FnOnce(&T) -> U>(&self, f: F) -> U {
f(&self.value)
}
}
#[derive(Debug, Default)]
pub struct Leaf<NameType, ValueType> {
label: NameType,
value: ValueType,
}
///
/// Leaf node builder.
///
impl Leaf<(), ()> {
///
/// Makes new leaf builder with empty filed.
///
pub fn new() -> Self {
Leaf {
label: (),
value: (),
}
}
}
impl<T: Clone + Debug> Leaf<String, T> {
///
/// Makes `Cherry<T>` from `self.label`and `self.value`.
///
/// # Examples
/// ```
/// extern crate cherries;
/// use cherries::node::{Leaf, Cherries};
///
/// let x = Leaf::new()
/// .name("x")
/// .value(2)
/// .build();
/// assert_eq!(x.quantity(), &2);
/// assert_eq!(x.name(), &"x".to_string());
/// ```
pub fn build(self) -> Cherry<T> {
Cherry {
label: self.label,
value: self.value,
previous: None,
}
}
}
impl<NameType, ValueType> Leaf<NameType, ValueType> {
///
/// Sets field `label`.
///
pub fn name<S: Into<String>>(self, name: S) -> Leaf<String, ValueType>
|
///
/// Sets field `value`.
///
pub fn value<T: Clone + Debug>(self, val: T) -> Leaf<NameType, T> {
Leaf {
label: self.label,
value: val,
}
}
}
#[doc(hidden)]
#[derive(Debug, Default)]
pub struct Node<NameType, ValueType, PrevType> {
label: NameType,
value: ValueType,
previous: PrevType,
}
#[doc(hidden)]
impl Node<(), (), ()> {
pub fn new() -> Self {
Node {
label: (),
value: (),
previous: (),
}
}
}
#[doc(hidden)]
impl<T: Clone + Debug> Node<String, T, String> {
pub fn build(self) -> Cherry<T> {
Cherry {
label: self.label,
value: self.value,
previous: Some(self.previous),
}
}
}
#[doc(hidden)]
impl<NameType, ValueType, PrevType> Node<NameType, ValueType, PrevType> {
pub fn name<S: Into<String>>(self, name: S) -> Node<String, ValueType, PrevType> {
Node {
label: name.into(),
value: self.value,
previous: self.previous,
}
}
pub fn value<T: Clone + Debug>(self, val: T) -> Node<NameType, T, PrevType> {
Node {
label: self.label,
value: val,
previous: self.previous,
}
}
pub fn prev<S: Into<String>>(self, prev: S) -> Node<NameType, ValueType, String> {
Node {
label: self.label,
value: self.value,
previous: prev.into(),
}
}
}
|
{
Leaf {
label: name.into(),
value: self.value,
}
}
|
tasks.rs
|
use std::{
collections::{HashMap, HashSet},
io,
path::{Path, PathBuf},
process::Command,
time::{Duration, Instant},
};
use color_eyre::eyre::{bail, eyre, Result};
use displaydoc::Display;
use itertools::Itertools;
use log::{debug, error, info, trace, warn};
use rayon::prelude::*;
use thiserror::Error;
use self::{
task::{CommandType, Task},
TaskError as E,
};
use crate::{config, env::get_env, files::remove_broken_symlink, tasks::task::TaskStatus};
pub mod completions;
pub mod defaults;
pub mod git;
pub mod link;
pub mod task;
pub mod update_self;
// TODO(gib): If there's only one task left, stream output directly to the
// console and run sync.
// TODO(gib): Use https://lib.rs/crates/indicatif for progress bars.
// TODO(gib): use tui Terminal UI lib (https://crates.io/keywords/tui) for better UI.
pub trait ResolveEnv {
/// Expand env vars in `self` by running `enf_fn()` on its component
/// strings.
///
/// # Errors
/// `resolve_env()` should return any errors returned by the `env_fn()`.
fn resolve_env<F>(&mut self, _env_fn: F) -> Result<(), E>
where
F: Fn(&str) -> Result<String, E>,
{
Ok(())
}
}
/// What to do with the tasks.
#[derive(Debug, Clone, Copy)]
pub enum TasksAction {
/// Run tasks.
Run,
/// Just list the matching tasks.
List,
}
/// Directory in which to find the tasks.
#[derive(Debug, Clone, Copy)]
pub enum TasksDir {
/// Normal tasks to execute.
Tasks,
/// Generation tasks (that generate your main tasks).
GenerateTasks,
}
impl TasksDir {
fn to_dir_name(self) -> String
|
}
/// Run a set of tasks specified in a subdir of the directory containing the up
/// config.
pub fn run(
config: &config::UpConfig,
tasks_dirname: TasksDir,
tasks_action: TasksAction,
) -> Result<()> {
// TODO(gib): Handle missing dir & move into config.
let mut tasks_dir = config
.up_yaml_path
.as_ref()
.ok_or(E::UnexpectedNone)?
.clone();
tasks_dir.pop();
tasks_dir.push(tasks_dirname.to_dir_name());
let env = get_env(
config.config_yaml.inherit_env.as_ref(),
config.config_yaml.env.as_ref(),
)?;
// If in macOS, don't let the display sleep until the command exits.
#[cfg(target_os = "macos")]
Command::new("caffeinate")
.args(&["-ds", "-w", &std::process::id().to_string()])
.spawn()?;
// TODO(gib): Handle and filter by constraints.
let bootstrap_tasks = match (config.bootstrap, &config.config_yaml.bootstrap_tasks) {
(false, _) => Ok(Vec::new()),
(true, None) => Err(eyre!(
"Bootstrap flag set but no bootstrap_tasks specified in config."
)),
(true, Some(b_tasks)) => Ok(b_tasks.clone()),
}?;
let filter_tasks_set: Option<HashSet<String>> =
config.tasks.clone().map(|v| v.into_iter().collect());
debug!("Filter tasks set: {filter_tasks_set:?}");
let mut tasks: HashMap<String, task::Task> = HashMap::new();
for entry in tasks_dir.read_dir().map_err(|e| E::ReadDir {
path: tasks_dir.clone(),
source: e,
})? {
let entry = entry?;
if entry.file_type()?.is_dir() {
continue;
}
let path = entry.path();
// If file is a broken symlink.
if !path.exists() && path.symlink_metadata().is_ok() {
remove_broken_symlink(&path)?;
continue;
}
let task = task::Task::from(&path)?;
let name = &task.name;
if let Some(filter) = filter_tasks_set.as_ref() {
if !filter.contains(name) {
debug!("Not running task '{name}' as not in tasks filter {filter:?}",);
continue;
}
}
tasks.insert(name.clone(), task);
}
if matches!(tasks_action, TasksAction::Run)
&& tasks.values().any(|t| t.config.needs_sudo)
&& users::get_current_uid() != 0
{
// TODO(gib): this only lasts for 5 minutes.
debug!("Prompting for superuser privileges with 'sudo -v'");
Command::new("sudo").arg("-v").output()?;
}
debug!("Task count: {:?}", tasks.len());
trace!("Task list: {tasks:#?}");
match tasks_action {
TasksAction::List => println!("{}", tasks.keys().join("\n")),
TasksAction::Run => run_tasks(bootstrap_tasks, tasks, &env, &config.up_dir)?,
}
Ok(())
}
fn run_tasks(
bootstrap_tasks: Vec<String>,
mut tasks: HashMap<String, task::Task>,
env: &HashMap<String, String>,
up_dir: &Path,
) -> Result<()> {
let mut completed_tasks = Vec::new();
if !bootstrap_tasks.is_empty() {
for task in bootstrap_tasks {
let task = run_task(
tasks
.remove(&task)
.ok_or_else(|| eyre!("Task '{task}' was missing."))?,
env,
up_dir,
);
if let TaskStatus::Failed(e) = task.status {
bail!(e);
}
completed_tasks.push(task);
}
}
completed_tasks.extend(
tasks
.into_par_iter()
.filter(|(_, task)| task.config.auto_run.unwrap_or(true))
.map(|(_, task)| run_task(task, env, up_dir))
.collect::<Vec<Task>>(),
);
let completed_tasks_len = completed_tasks.len();
let mut tasks_passed = Vec::new();
let mut tasks_skipped = Vec::new();
let mut tasks_failed = Vec::new();
let mut tasks_incomplete = Vec::new();
for task in completed_tasks {
match task.status {
TaskStatus::Failed(_) => {
tasks_failed.push(task);
}
TaskStatus::Passed => tasks_passed.push(task),
TaskStatus::Skipped => tasks_skipped.push(task),
TaskStatus::Incomplete => tasks_incomplete.push(task),
}
}
info!(
"Ran {completed_tasks_len} tasks, {} passed, {} failed, {} skipped",
tasks_passed.len(),
tasks_failed.len(),
tasks_skipped.len()
);
if !tasks_passed.is_empty() {
info!(
"Tasks passed: {:?}",
tasks_passed.iter().map(|t| &t.name).collect::<Vec<_>>()
);
}
if !tasks_skipped.is_empty() {
info!(
"Tasks skipped: {:?}",
tasks_skipped.iter().map(|t| &t.name).collect::<Vec<_>>()
);
}
if !tasks_failed.is_empty() {
error!("One or more tasks failed, exiting.");
error!(
"Tasks failed: {:#?}",
tasks_failed.iter().map(|t| &t.name).collect::<Vec<_>>()
);
let mut tasks_failed_iter = tasks_failed.into_iter().filter_map(|t| match t.status {
TaskStatus::Failed(e) => Some(e),
_ => None,
});
let err = tasks_failed_iter.next().ok_or(E::UnexpectedNone)?;
let err = eyre!(err);
tasks_failed_iter.fold(Err(err), color_eyre::Help::error)?;
}
Ok(())
}
fn run_task(mut task: Task, env: &HashMap<String, String>, up_dir: &Path) -> Task {
let env_fn = &|s: &str| {
let out = shellexpand::full_with_context(s, dirs::home_dir, |k| {
env.get(k).ok_or_else(|| eyre!("Value not found")).map(Some)
})
.map(std::borrow::Cow::into_owned)
.map_err(|e| E::ResolveEnv {
var: e.var_name,
source: e.cause,
})?;
Ok(out)
};
let now = Instant::now();
task.run(env_fn, env, up_dir);
let elapsed_time = now.elapsed();
if elapsed_time > Duration::from_secs(60) {
warn!("Task {} took {:?}", task.name, elapsed_time);
}
task
}
#[derive(Error, Debug, Display)]
/// Errors thrown by this file.
pub enum TaskError {
/// Task '{name}' {lib} failed.
TaskError {
source: color_eyre::eyre::Error,
lib: String,
name: String,
},
/// Error walking directory '{path}':
ReadDir { path: PathBuf, source: io::Error },
/// Error reading file '{path}':
ReadFile { path: PathBuf, source: io::Error },
/// Env lookup error, please define '{var}' in your up.yaml:"
EnvLookup {
var: String,
source: color_eyre::eyre::Error,
},
/// Commmand was empty.
EmptyCmd,
/// Task '{name}' had no run command.
MissingCmd { name: String },
/**
Task '{name}' {command_type} failed.Command: {cmd:?}.{suggestion}
*/
CmdFailed {
command_type: CommandType,
name: String,
source: io::Error,
cmd: Vec<String>,
suggestion: String,
},
/// Task '{name}' {command_type} failed with exit code {code}. Command: {cmd:?}.
CmdNonZero {
command_type: CommandType,
name: String,
cmd: Vec<String>,
code: i32,
},
/// Task '{name}' {command_type} was terminated. Command: {cmd:?}.
CmdTerminated {
command_type: CommandType,
name: String,
cmd: Vec<String>,
},
/// Unexpectedly empty option found.
UnexpectedNone,
/// Invalid yaml at '{path}':
InvalidYaml {
path: PathBuf,
source: serde_yaml::Error,
},
/// Env lookup error, please define '{var}' in your up.yaml
ResolveEnv {
var: String,
source: color_eyre::eyre::Error,
},
/// Task {task} must have data.
TaskDataRequired { task: String },
/// Failed to parse the config.
DeserializeError { source: serde_yaml::Error },
}
|
{
match self {
TasksDir::Tasks => "tasks".to_owned(),
TasksDir::GenerateTasks => "generate_tasks".to_owned(),
}
}
|
api_op_DescribeConfigRuleEvaluationStatus.go
|
// Code generated by smithy-go-codegen DO NOT EDIT.
package configservice
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/configservice/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns status information for each of your AWS managed Config rules. The status
// includes information such as the last time AWS Config invoked the rule, the last
// time AWS Config failed to invoke the rule, and the related error for the last
// failure.
func (c *Client) DescribeConfigRuleEvaluationStatus(ctx context.Context, params *DescribeConfigRuleEvaluationStatusInput, optFns ...func(*Options)) (*DescribeConfigRuleEvaluationStatusOutput, error) {
if params == nil {
params = &DescribeConfigRuleEvaluationStatusInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeConfigRuleEvaluationStatus", params, optFns, c.addOperationDescribeConfigRuleEvaluationStatusMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeConfigRuleEvaluationStatusOutput)
out.ResultMetadata = metadata
return out, nil
}
//
type DescribeConfigRuleEvaluationStatusInput struct {
// The name of the AWS managed Config rules for which you want status information.
// If you do not specify any names, AWS Config returns status information for all
// AWS managed Config rules that you use.
ConfigRuleNames []string
// The number of rule evaluation results that you want returned. This parameter is
// required if the rule limit for your account is more than the default of 150
// rules. For information about requesting a rule limit increase, see AWS Config
// Limits
// (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config)
// in the AWS General Reference Guide.
Limit int32
// The nextToken string returned on a previous page that you use to get the next
// page of results in a paginated response.
NextToken *string
noSmithyDocumentSerde
}
//
type DescribeConfigRuleEvaluationStatusOutput struct {
// Status information about your AWS managed Config rules.
ConfigRulesEvaluationStatus []types.ConfigRuleEvaluationStatus
// The string that you use in a subsequent request to get the next page of results
// in a paginated response.
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeConfigRuleEvaluationStatusMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeConfigRuleEvaluationStatus{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeConfigRuleEvaluationStatus{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeConfigRuleEvaluationStatus(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribeConfigRuleEvaluationStatusAPIClient is a client that implements the
// DescribeConfigRuleEvaluationStatus operation.
type DescribeConfigRuleEvaluationStatusAPIClient interface {
DescribeConfigRuleEvaluationStatus(context.Context, *DescribeConfigRuleEvaluationStatusInput, ...func(*Options)) (*DescribeConfigRuleEvaluationStatusOutput, error)
}
var _ DescribeConfigRuleEvaluationStatusAPIClient = (*Client)(nil)
// DescribeConfigRuleEvaluationStatusPaginatorOptions is the paginator options for
// DescribeConfigRuleEvaluationStatus
type DescribeConfigRuleEvaluationStatusPaginatorOptions struct {
// The number of rule evaluation results that you want returned. This parameter is
// required if the rule limit for your account is more than the default of 150
// rules. For information about requesting a rule limit increase, see AWS Config
// Limits
// (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config)
// in the AWS General Reference Guide.
Limit int32
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// DescribeConfigRuleEvaluationStatusPaginator is a paginator for
// DescribeConfigRuleEvaluationStatus
type DescribeConfigRuleEvaluationStatusPaginator struct {
options DescribeConfigRuleEvaluationStatusPaginatorOptions
client DescribeConfigRuleEvaluationStatusAPIClient
params *DescribeConfigRuleEvaluationStatusInput
nextToken *string
firstPage bool
}
// NewDescribeConfigRuleEvaluationStatusPaginator returns a new
// DescribeConfigRuleEvaluationStatusPaginator
func NewDescribeConfigRuleEvaluationStatusPaginator(client DescribeConfigRuleEvaluationStatusAPIClient, params *DescribeConfigRuleEvaluationStatusInput, optFns ...func(*DescribeConfigRuleEvaluationStatusPaginatorOptions)) *DescribeConfigRuleEvaluationStatusPaginator {
if params == nil {
params = &DescribeConfigRuleEvaluationStatusInput{}
}
options := DescribeConfigRuleEvaluationStatusPaginatorOptions{}
if params.Limit != 0 {
options.Limit = params.Limit
}
for _, fn := range optFns {
fn(&options)
}
return &DescribeConfigRuleEvaluationStatusPaginator{
options: options,
client: client,
params: params,
firstPage: true,
}
}
// HasMorePages returns a boolean indicating whether more pages are available
func (p *DescribeConfigRuleEvaluationStatusPaginator) HasMorePages() bool {
return p.firstPage || p.nextToken != nil
}
// NextPage retrieves the next DescribeConfigRuleEvaluationStatus page.
func (p *DescribeConfigRuleEvaluationStatusPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeConfigRuleEvaluationStatusOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
params.Limit = p.options.Limit
result, err := p.client.DescribeConfigRuleEvaluationStatus(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opDescribeConfigRuleEvaluationStatus(region string) *awsmiddleware.RegisterServiceMetadata
|
{
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "config",
OperationName: "DescribeConfigRuleEvaluationStatus",
}
}
|
|
jointSBM.py
|
import numpy as np
import scipy as sp
from numpy.linalg import inv, cholesky
from scipy.linalg import eig
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
from sklearn.metrics.cluster import adjusted_rand_score as ari
from sklearn.metrics.cluster import contingency_matrix
from sklearn.cluster import KMeans
import random
import time
from joblib import Parallel, delayed
import multiprocessing
from tqdm import tqdm
import logging
from utils.utils import *
def update_W(X,Q,deltas_,K):
delta2_n, delta2, gamma_n = deltas_
XnQn = []
XXn = []
for i in range(len(Q)):
rt = np.sum(delta2)/np.sum(delta2_n[i])
XnQn.append(np.matmul(X[i].T,Q[i]*gamma_n[i]))
XXn.append(delta2_n[i]*gamma_n[i])
XQ = np.sum(XnQn,axis = 0)
denom = np.sum(XXn,axis = 0)
W = np.matmul(inv(denom),XQ)
return W
def update_xni(q,W,rt,fac,gamman,K):
dif = W - q
term2 = ((q*np.sqrt(fac) + q*1./np.sqrt(rt))**2).sum(axis = 1)
dist = np.diag(np.matmul(dif,dif.T))*gamman + term2
x = onehot_vec(np.argmin(dist),K)
return x
def get_fac(Vn,V,K):
x = np.eye(K)
fac = np.nan_to_num([[(Vn - x[i,:] + x[k,:])/(V - x[i,:] + x[k,:]) for k in range(K)] for i in range(K)])
# fac[fac<0] = 0
gamman = fac.sum(axis=2)
return fac, gamman
def processLoop(Qn,W,Xn,V,deltas2_n,K):
Vn = np.diag(deltas2_n)
rt = np.sum(V)/np.sum(Vn)
fac,gamman = get_fac(Vn,V,K)
for i in range(Xn.shape[0]):
xx = Xn[i,].argmax()
Xn[i,] = update_xni(Qn[i,],W,rt,fac[xx],gamman[xx],K)
return Xn
def mcr(x,y):
cm = contingency_matrix(x,y)
return (cm.max(axis = 0).sum())*1./cm.sum()
def get_Qn(adj,K):
if sp.sparse.issparse(adj):
eig_decomp = sp.sparse.linalg.eigs(adj,K)
else:
eig_decomp = eig(adj)
args = np.argsort(-abs(eig_decomp[0]),)[:K]
D = (eig_decomp[0][args])
U = (eig_decomp[1][:,args])
return abs(np.matmul(U,np.diag(D)))
def get_counts(X):
delta2_n = np.array([np.diag(np.sum(X[i],axis=0)) for i in range(len(X))])
delta2 = np.sum(delta2_n,axis=0)
gamma_n = np.array([np.sum(inv(delta2)*delta2_n[i]) for i in range(len(X))])
return delta2_n, delta2, gamma_n
class jointSBM(object):
def __init__(self, graphs, K, \
edgelist = False,
tol = 1e-5, groundTruth = None,
init = 'kmeans++', seed = 242, **kwargs):
graphs = graphs.copy()
if (type(graphs)!=dict):
self.graphNames = ["graph"+str(g) for g in range(1,len(graphs)+1)]
graphs = dict(zip(self.graphNames,graphs))
else:
self.graphNames = [k for k in graphs.keys()]
if (groundTruth!=None):
if (type(groundTruth)==dict):
self.groundTruth = [groundTruth[g] for g in self.graphNames]
# self.groundTruth = [g for k,g in groundTruth.items()]
if np.any([g.shape[0]!=g.shape[1] for k,g in graphs.items()]):
print("Converting edgelists to sparse adjacency matrices...")
edgelist = True
if edgelist:
self.idx2node = {}
for gg in self.graphNames:
graphs[gg], self.idx2node[gg] = edgelist2sparse(graphs[gg],**kwargs)
# print("Converted edgelists to sparse adjacency matrices.")
self.graphs = [graphs[g] for g in self.graphNames]
n_graphs = len(graphs)
self.n_graphs = n_graphs
self.n_nodes_array = [self.graphs[i].shape[0] for i in range(n_graphs)]
self.total_nodes = np.sum(self.n_nodes_array)
self.K = K
self.tol = tol
self.init = init
self.seed = seed
self.data_prepared = False
def prepare_data(self):
Q = []
X = []
for i in tqdm(range(self.n_graphs)):
Qn = get_Qn(self.graphs[i],self.K)*np.sqrt(self.total_nodes*1./self.n_nodes_array[i])
Q.append(Qn)
X.append(self.initX(Qn))
self.Q = Q
self.X = X
self.deltas_ = get_counts(X)
self.data_prepared = True
def initX(self, Qn):
K = self.K
if self.init == 'kmeans++':
km = KMeans(n_clusters=K,random_state=self.seed).fit(Qn).labels_
Xn = np.vstack([onehot_vec(r,K) for r in km])
else:
random.seed(self.seed)
Xn = np.random.multinomial(1,[1./K]*K,size = n_nodes)
return Xn
def fit(self, printLoss = False, maxIter = 200, parallel = False, n_cores = -1):
self.maxIter = maxIter
self.parallel = parallel
self.n_cores = n_cores
if not self.data_prepared:
self.prepare_data()
X = self.X
Q = self.Q
deltas_ = self.deltas_
K = self.K
n_graphs = self.n_graphs
n_nodes_array = self.n_nodes_array
Loss = [0]
stopValue = 1
iter = -1
measures = {}
while (stopValue > self.tol and iter < self.maxIter):
t0 = time.time()
iter = iter + 1
W = update_W(X,Q,deltas_,K)
V = np.diag(deltas_[1])
memberships = []
counts_memberships = np.zeros([1,K])
if self.parallel:
if n_cores == -1:
num_cores = multiprocessing.cpu_count()
else:
num_cores = self.n_cores
X = Parallel(n_jobs=num_cores)(delayed(processLoop)(Q[n],W,X[n],V,deltas_[0][n],K) for n in range(n_graphs))
for x in X:
# memberships.append(np.argmax(x,1))
counts_memberships += np.sum(x,0)
else:
for n in range(n_graphs):
X[n] = processLoop(Q[n],W,X[n],V,deltas_[0][n],K)
counts_memberships += np.sum(X[n],0)
if (np.sum(counts_memberships==0.)>0):
iter = 1
logging.warning("Restarting...")
X = [np.random.multinomial(1,[1./K]*K,size = n_nodes_array[i]) for i in range(n_graphs)]
memberships = [np.argmax(X[n],1) for n in range(len(X))]
deltas_ = get_counts(X)
loss = np.ndarray([n_graphs])
for n in range(n_graphs):
XW = np.matmul(X[n],W)
rt = np.sum(deltas_[1])/np.sum(deltas_[0][n])
term = np.sqrt(np.matmul(inv(deltas_[1]),deltas_[0][n])) + np.sqrt(np.eye(K)*1./rt)
loss[n] = deltas_[2][n]*frobenius_norm(XW-Q[n])**2 + frobenius_norm(np.matmul(Q[n],term))**2
t1 = time.time()
Loss.append(np.sum(loss))
if printLoss:
|
stopValue = abs(Loss[iter] - Loss[iter-1])
if (self.groundTruth!=None):
measures[iter] = self.evalutate(memberships)
measures[iter]['Time'] = t1-t0
else:
measures[iter] = {'Time':t1-t0}
theta = estimateTheta(X,self.graphs)
order = np.argsort(-1*np.diag(theta),)
theta = theta[order,][:,order]
self.theta = theta
self.W = W[order,]
memberships = []
for n in range(n_graphs):
X[n] = X[n][:,order]
memberships.append(dict(zip(range(1,len(X[n])+1),np.argmax(X[n],1))))
memberships = dict(zip(self.graphNames,memberships))
self.memberships = memberships
self.X = X
self.measures = measures
self.iter = iter
return memberships, theta, W, measures
def evalutate(self, memberships):
groundTruth = self.groundTruth
n_graphs = self.n_graphs
individual_nmi = np.zeros([n_graphs])
individual_ari = np.zeros([n_graphs])
individual_mcr = np.zeros([n_graphs])
for n in range(n_graphs):
# print(n)
individual_nmi[n] = nmi(memberships[n],groundTruth[n])
individual_ari[n] = ari(memberships[n],groundTruth[n])
individual_mcr[n] = mcr(memberships[n],groundTruth[n])
trueMemberships_stacked = np.reshape(np.hstack(groundTruth),[-1])
memberships_stacked = np.hstack(memberships)
overall_nmi = nmi(memberships_stacked,trueMemberships_stacked)
overall_ari = ari(memberships_stacked,trueMemberships_stacked)
overall_mcr = mcr(memberships_stacked,trueMemberships_stacked)
return {"NMI" : {'nmi' : np.mean(individual_nmi),'overall_nmi' : overall_nmi},"ARI" : {'ari' : np.mean(individual_ari),'overall_ari' : overall_ari},"MCR" : {'mcr' : np.mean(individual_mcr),'overall_mcr' : overall_mcr}}
|
print("Iter: {} | Loss: {}".format(iter, Loss[iter]))
|
function.rs
|
//! Intermediate representation for C/C++ functions and methods.
use super::comp::MethodKind;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::item::Item;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::TypeKind;
use clang;
use clang_sys::{self, CXCallingConv};
use ir::derive::{CanTriviallyDeriveDebug, CanTriviallyDeriveHash,
CanTriviallyDerivePartialEqOrPartialOrd, CanDerive};
use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
use quote;
use std::io;
const RUST_DERIVE_FUNPTR_LIMIT: usize = 12;
/// What kind of a function are we looking at?
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum FunctionKind {
/// A plain, free function.
Function,
/// A method of some kind.
Method(MethodKind),
}
impl FunctionKind {
fn from_cursor(cursor: &clang::Cursor) -> Option<FunctionKind> {
// FIXME(emilio): Deduplicate logic with `ir::comp`.
Some(match cursor.kind() {
clang_sys::CXCursor_FunctionDecl => FunctionKind::Function,
clang_sys::CXCursor_Constructor => FunctionKind::Method(
MethodKind::Constructor,
),
clang_sys::CXCursor_Destructor => FunctionKind::Method(
if cursor.method_is_virtual() {
MethodKind::VirtualDestructor {
pure_virtual: cursor.method_is_pure_virtual(),
}
} else {
MethodKind::Destructor
}
),
clang_sys::CXCursor_CXXMethod => {
if cursor.method_is_virtual() {
FunctionKind::Method(MethodKind::Virtual {
pure_virtual: cursor.method_is_pure_virtual(),
})
} else if cursor.method_is_static() {
FunctionKind::Method(MethodKind::Static)
} else {
FunctionKind::Method(MethodKind::Normal)
}
}
_ => return None,
})
}
}
/// The style of linkage
#[derive(Debug, Clone, Copy)]
pub enum Linkage {
/// Externally visible and can be linked against
External,
/// Not exposed externally. 'static inline' functions will have this kind of linkage
Internal
}
/// A function declaration, with a signature, arguments, and argument names.
///
/// The argument names vector must be the same length as the ones in the
/// signature.
#[derive(Debug)]
pub struct Function {
/// The name of this function.
name: String,
/// The mangled name, that is, the symbol.
mangled_name: Option<String>,
/// The id pointing to the current function signature.
signature: TypeId,
/// The doc comment on the function, if any.
comment: Option<String>,
/// The kind of function this is.
kind: FunctionKind,
/// The linkage of the function.
linkage: Linkage,
}
impl Function {
/// Construct a new function.
pub fn new(
name: String,
mangled_name: Option<String>,
signature: TypeId,
comment: Option<String>,
kind: FunctionKind,
linkage: Linkage
) -> Self {
Function {
name,
mangled_name,
signature,
comment,
kind,
linkage,
}
}
/// Get this function's name.
pub fn name(&self) -> &str {
&self.name
}
/// Get this function's name.
pub fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_ref().map(|n| &**n)
}
/// Get this function's signature type.
pub fn signature(&self) -> TypeId {
self.signature
}
/// Get this function's kind.
pub fn kind(&self) -> FunctionKind {
self.kind
}
/// Get this function's linkage.
pub fn linkage(&self) -> Linkage {
self.linkage
}
}
impl DotAttributes for Function {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if let Some(ref mangled) = self.mangled_name {
let mangled: String =
mangled.chars().flat_map(|c| c.escape_default()).collect();
writeln!(
out,
"<tr><td>mangled name</td><td>{}</td></tr>",
mangled
)?;
}
Ok(())
}
}
/// An ABI extracted from a clang cursor.
#[derive(Debug, Copy, Clone)]
pub enum Abi {
/// The default C ABI.
C,
/// The "stdcall" ABI.
Stdcall,
/// The "fastcall" ABI.
Fastcall,
/// The "thiscall" ABI.
ThisCall,
/// The "aapcs" ABI.
Aapcs,
/// The "win64" ABI.
Win64,
/// An unknown or invalid ABI.
Unknown(CXCallingConv),
}
impl Abi {
/// Returns whether this Abi is known or not.
fn is_unknown(&self) -> bool {
match *self {
Abi::Unknown(..) => true,
_ => false,
}
}
}
impl quote::ToTokens for Abi {
fn to_tokens(&self, tokens: &mut quote::Tokens) {
tokens.append_all(match *self {
Abi::C => quote! { "C" },
Abi::Stdcall => quote! { "stdcall" },
Abi::Fastcall => quote! { "fastcall" },
Abi::ThisCall => quote! { "thiscall" },
Abi::Aapcs => quote! { "aapcs" },
Abi::Win64 => quote! { "win64" },
Abi::Unknown(cc) => panic!(
"Cannot turn unknown calling convention to tokens: {:?}",
cc
),
});
}
}
/// A function signature.
#[derive(Debug)]
pub struct FunctionSig {
/// The return type of the function.
return_type: TypeId,
/// The type of the arguments, optionally with the name of the argument when
/// declared.
argument_types: Vec<(Option<String>, TypeId)>,
/// Whether this function is variadic.
is_variadic: bool,
/// The ABI of this function.
abi: Abi,
}
fn get_abi(cc: CXCallingConv) -> Abi {
use clang_sys::*;
match cc {
CXCallingConv_Default => Abi::C,
CXCallingConv_C => Abi::C,
CXCallingConv_X86StdCall => Abi::Stdcall,
CXCallingConv_X86FastCall => Abi::Fastcall,
CXCallingConv_X86ThisCall => Abi::ThisCall,
CXCallingConv_AAPCS => Abi::Aapcs,
CXCallingConv_X86_64Win64 => Abi::Win64,
other => Abi::Unknown(other),
}
}
/// Get the mangled name for the cursor's referent.
pub fn cursor_mangling(
ctx: &BindgenContext,
cursor: &clang::Cursor,
) -> Option<String> {
use clang_sys;
if !ctx.options().enable_mangling {
return None;
}
// We early return here because libclang may crash in some case
// if we pass in a variable inside a partial specialized template.
// See rust-lang-nursery/rust-bindgen#67, and rust-lang-nursery/rust-bindgen#462.
if cursor.is_in_non_fully_specialized_template() {
return None;
}
let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor;
if let Ok(mut manglings) = cursor.cxx_manglings() {
while let Some(m) = manglings.pop() {
// Only generate the destructor group 1, see below.
if is_destructor && !m.ends_with("D1Ev") {
continue;
}
return Some(m);
}
}
let mut mangling = cursor.mangling();
if mangling.is_empty() {
return None;
}
if is_destructor {
// With old (3.8-) libclang versions, and the Itanium ABI, clang returns
// the "destructor group 0" symbol, which means that it'll try to free
// memory, which definitely isn't what we want.
//
// Explicitly force the destructor group 1 symbol.
//
// See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special
// for the reference, and http://stackoverflow.com/a/6614369/1091587 for
// a more friendly explanation.
//
// We don't need to do this for constructors since clang seems to always
// have returned the C1 constructor.
//
// FIXME(emilio): Can a legit symbol in other ABIs end with this string?
// I don't think so, but if it can this would become a linker error
// anyway, not an invalid free at runtime.
//
// TODO(emilio, #611): Use cpp_demangle if this becomes nastier with
// time.
if mangling.ends_with("D0Ev") {
let new_len = mangling.len() - 4;
mangling.truncate(new_len);
mangling.push_str("D1Ev");
}
}
Some(mangling)
}
impl FunctionSig {
/// Construct a new function signature.
pub fn new(
return_type: TypeId,
arguments: Vec<(Option<String>, TypeId)>,
is_variadic: bool,
abi: Abi,
) -> Self {
FunctionSig {
return_type: return_type,
argument_types: arguments,
is_variadic: is_variadic,
abi: abi,
}
}
/// Construct a new function signature from the given Clang type.
pub fn from_ty(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("FunctionSig::from_ty {:?} {:?}", ty, cursor);
// Skip function templates
if cursor.kind() == CXCursor_FunctionTemplate {
return Err(ParseError::Continue);
}
// Don't parse operatorxx functions in C++
let spelling = cursor.spelling();
if spelling.starts_with("operator") {
return Err(ParseError::Continue);
}
let cursor = if cursor.is_valid() {
*cursor
} else {
ty.declaration()
};
let mut args: Vec<_> = match cursor.kind() {
CXCursor_FunctionDecl |
CXCursor_Constructor |
CXCursor_CXXMethod |
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
// For CXCursor_FunctionDecl, cursor.args() is the reliable way
// to get parameter names and types.
cursor
.args()
.unwrap()
.iter()
.map(|arg| {
let arg_ty = arg.cur_type();
let name = arg.spelling();
let name =
if name.is_empty() { None } else { Some(name) };
let ty = Item::from_ty_or_ref(arg_ty, *arg, None, ctx);
(name, ty)
})
.collect()
}
_ => {
// For non-CXCursor_FunctionDecl, visiting the cursor's children
// is the only reliable way to get parameter names.
let mut args = vec![];
cursor.visit(|c| {
if c.kind() == CXCursor_ParmDecl {
let ty =
Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
let name = c.spelling();
let name =
if name.is_empty() { None } else { Some(name) };
args.push((name, ty));
}
CXChildVisit_Continue
});
args
}
};
let is_method = cursor.kind() == CXCursor_CXXMethod;
let is_constructor = cursor.kind() == CXCursor_Constructor;
let is_destructor = cursor.kind() == CXCursor_Destructor;
if (is_constructor || is_destructor || is_method) &&
cursor.lexical_parent() != cursor.semantic_parent()
{
// Only parse constructors once.
return Err(ParseError::Continue);
}
if is_method || is_constructor || is_destructor {
let is_const = is_method && cursor.method_is_const();
let is_virtual = is_method && cursor.method_is_virtual();
let is_static = is_method && cursor.method_is_static();
if !is_static && !is_virtual {
let parent = cursor.semantic_parent();
let class = Item::parse(parent, None, ctx)
.expect("Expected to parse the class");
// The `class` most likely is not finished parsing yet, so use
// the unchecked variant.
let class = class.as_type_id_unchecked();
let class = if is_const {
let const_class_id = ctx.next_item_id();
ctx.build_const_wrapper(
const_class_id,
class,
None,
&parent.cur_type(),
)
} else {
class
};
let ptr =
Item::builtin_type(TypeKind::Pointer(class), false, ctx);
args.insert(0, (Some("this".into()), ptr));
} else if is_virtual {
let void = Item::builtin_type(TypeKind::Void, false, ctx);
let ptr =
Item::builtin_type(TypeKind::Pointer(void), false, ctx);
args.insert(0, (Some("this".into()), ptr));
}
}
let ty_ret_type = if cursor.kind() == CXCursor_ObjCInstanceMethodDecl ||
cursor.kind() == CXCursor_ObjCClassMethodDecl
{
ty.ret_type().or_else(|| cursor.ret_type()).ok_or(
ParseError::Continue,
)?
} else {
ty.ret_type().ok_or(ParseError::Continue)?
};
let ret = Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx);
let call_conv = ty.call_conv();
let abi = get_abi(call_conv);
if abi.is_unknown() {
warn!("Unknown calling convention: {:?}", call_conv);
}
Ok(Self::new(ret.into(), args, ty.is_variadic(), abi))
}
/// Get this function signature's return type.
pub fn return_type(&self) -> TypeId {
self.return_type
}
/// Get this function signature's argument (name, type) pairs.
pub fn argument_types(&self) -> &[(Option<String>, TypeId)] {
&self.argument_types
}
/// Get this function signature's ABI.
pub fn abi(&self) -> Abi {
self.abi
}
/// Is this function signature variadic?
pub fn is_variadic(&self) -> bool {
// Clang reports some functions as variadic when they *might* be
// variadic. We do the argument check because rust doesn't codegen well
// variadic functions without an initial argument.
self.is_variadic && !self.argument_types.is_empty()
}
/// Are function pointers with this signature able to derive Rust traits?
/// Rust only supports deriving traits for function pointers with a limited
/// number of parameters and a couple ABIs.
///
/// For more details, see:
///
/// * https://github.com/rust-lang-nursery/rust-bindgen/issues/547,
/// * https://github.com/rust-lang/rust/issues/38848,
/// * and https://github.com/rust-lang/rust/issues/40158
pub fn function_pointers_can_derive(&self) -> bool {
if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT {
return false;
}
match self.abi {
Abi::C | Abi::Unknown(..) => true,
_ => false,
}
}
}
|
cursor: clang::Cursor,
context: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
let kind = match FunctionKind::from_cursor(&cursor) {
None => return Err(ParseError::Continue),
Some(k) => k,
};
debug!("Function::parse({:?}, {:?})", cursor, cursor.cur_type());
let visibility = cursor.visibility();
if visibility != CXVisibility_Default {
return Err(ParseError::Continue);
}
if cursor.access_specifier() == CX_CXXPrivate {
return Err(ParseError::Continue);
}
if !context.options().generate_inline_functions &&
cursor.is_inlined_function()
{
return Err(ParseError::Continue);
}
let linkage = cursor.linkage();
let linkage = match linkage {
CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External,
CXLinkage_Internal => Linkage::Internal,
_ => return Err(ParseError::Continue)
};
// Grab the signature using Item::from_ty.
let sig =
Item::from_ty(&cursor.cur_type(), cursor, None, context)?;
let mut name = cursor.spelling();
assert!(!name.is_empty(), "Empty function name?");
if cursor.kind() == CXCursor_Destructor {
// Remove the leading `~`. The alternative to this is special-casing
// code-generation for destructor functions, which seems less than
// ideal.
if name.starts_with('~') {
name.remove(0);
}
// Add a suffix to avoid colliding with constructors. This would be
// technically fine (since we handle duplicated functions/methods),
// but seems easy enough to handle it here.
name.push_str("_destructor");
}
let mut mangled_name = cursor_mangling(context, &cursor);
if mangled_name.as_ref() == Some(&name) {
mangled_name = None;
}
let comment = cursor.raw_comment();
let function = Self::new(name, mangled_name, sig, comment, kind, linkage);
Ok(ParseResult::New(function, Some(cursor)))
}
}
impl Trace for FunctionSig {
type Extra = ();
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn);
for &(_, ty) in self.argument_types() {
tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter);
}
}
}
impl CanTriviallyDeriveDebug for FunctionSig {
fn can_trivially_derive_debug(&self, _: &BindgenContext) -> bool {
self.function_pointers_can_derive()
}
}
impl CanTriviallyDeriveHash for FunctionSig {
fn can_trivially_derive_hash(&self, _: &BindgenContext) -> bool {
self.function_pointers_can_derive()
}
}
impl CanTriviallyDerivePartialEqOrPartialOrd for FunctionSig {
fn can_trivially_derive_partialeq_or_partialord(&self, _: &BindgenContext) -> CanDerive {
if self.function_pointers_can_derive() {
CanDerive::Yes
} else {
CanDerive::No
}
}
}
|
impl ClangSubItemParser for Function {
fn parse(
|
state_test.go
|
package cmd
import (
"io/ioutil"
"os"
"path"
"strconv"
"testing"
"github.com/strangelove-ventures/horcrux/signer"
"github.com/stretchr/testify/require"
)
func
|
(t *testing.T) {
tmpHome := "/tmp/TestStateSetCmd"
tmpConfig := path.Join(tmpHome, ".horcrux")
chainid := "horcrux-1"
err := os.Setenv("HOME", tmpHome)
require.NoError(t, err)
err = os.MkdirAll(tmpHome, 0777)
require.NoError(t, err)
cmd := initCmd()
cmd.SetOutput(ioutil.Discard)
cmd.SetArgs([]string{
chainid,
"tcp://10.168.0.1:1234",
"-c",
"-p", "tcp://10.168.1.2:2222|2,tcp://10.168.1.3:2222|3",
"--timeout", "1500ms",
})
err = cmd.Execute()
require.NoError(t, err)
tcs := []struct {
name string
args []string
expectErr bool
}{
{
name: "valid height",
args: []string{"123456789"},
expectErr: false,
},
{
name: "invalid height",
args: []string{"-123456789"},
expectErr: true,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
cmd := setStateCmd()
cmd.SetOutput(ioutil.Discard)
cmd.SetArgs(tc.args)
err = cmd.Execute()
if tc.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
height, err := strconv.ParseInt(tc.args[0], 10, 64)
require.NoError(t, err)
ss, err := signer.LoadSignState(path.Join(tmpConfig, "state", chainid+"_priv_validator_state.json"))
require.NoError(t, err)
require.Equal(t, height, ss.Height)
require.Equal(t, int64(0), ss.Round)
require.Equal(t, int8(0), ss.Step)
require.Nil(t, ss.EphemeralPublic)
require.Nil(t, ss.Signature)
require.Nil(t, ss.SignBytes)
ss, err = signer.LoadSignState(path.Join(tmpConfig, "state", chainid+"_share_sign_state.json"))
require.NoError(t, err)
require.Equal(t, height, ss.Height)
require.Equal(t, int64(0), ss.Round)
require.Equal(t, int8(0), ss.Step)
require.Nil(t, ss.EphemeralPublic)
require.Nil(t, ss.Signature)
require.Nil(t, ss.SignBytes)
}
})
}
t.Cleanup(func() {
os.RemoveAll(tmpHome)
})
}
|
TestStateSetCmd
|
messages.rs
|
use crate::config::read_config_suite;
use crate::discord::create_message;
use clap::ArgMatches;
pub fn
|
(matches: &ArgMatches) {
match matches.subcommand() {
("post", Some(matches)) => {
send_message(matches);
}
_ => unreachable!(),
}
}
fn send_message(matches: &ArgMatches) {
let mut input = String::new();
let _ = std::io::stdin().read_line(&mut input);
let config = read_config_suite();
let id = matches.value_of("channel_id");
let msg = create_message(config.token, id.unwrap().into(), input);
println!("{:#?}", msg);
}
|
run
|
pprocConvOrder.py
|
#!/usr/bin/env python3
# Template post-processing script for PeleLM convergence analysis
# Must be used after multirun.py script
# Input are limited by the regression framework.
# Usage:
# ./pprocConvOrder.py --pproc_exec prog.exe --test_name DummyTest
# Input:
# * --pproc_exec: the processing executable path
# * --test_name: a TESTNAME that will looked for during the postprocessing
# "Internal" user input
# * pproc_type:
# - pproc_type == "fcompare". fcompare is used to get the error from the initial solution (== analytical solution)
# - pproc_type == "diffsamedomain". Analytical solution is not known and errors are computed from the next finer grid
# * vars : a list of the variables of interest (no check is done on whether it exists in plt ...)
# * resolution : a list of the resolutions to post-process (should be consistent with multirun.py, if used)
# Output:
# * Convergence_${TESTNAME}.png file with the log-log plot of the error vs. resolution.
# * ConvTable_${TESTNAME}.tex file with the convergence rate formatted in an LaTeX table.
# * Convergence_${TESTNAME}.dat plain text file with the convergence rate.
# Head's up :
# - The script will get a copy of the post-processing program (if not already there) in the testing folder. The name of this folder is assumed to be the TESTNAME.
# - The plt files naming convention is: ${TESTNAME}_plt_${resolution}_*****. It is used to get the first and last solution of a test at a given resolution.
# - Errors are parsed from the screen output of the standard fcompare/diffsamedomain. Beware of any change of these programs.
import sys
import os
import fnmatch
import shutil
import argparse
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
USAGE = """
Template post-processing script for PeleLM convergence analysis
"""
def pproc(args):
# User data
vars=["y_velocity", "density", "temp", "Y(O2)", "Y(CH4)", "Y(CO2)", "Y(CO)", "Y(H2O)" ]
resolution = [64,128,256,512]
pproc_type = "diffsamedomain"
# Get a local copy of post-processing executable
run_dir = os.getcwd()
if ( not os.path.isfile(os.path.basename(args.pproc_exe)) ):
shutil.copy(args.pproc_exe, run_dir)
# Check the test name: current folder name is default
if ( args.test_name == "None" ):
args.test_name = run_dir.split("/")[-1]
# Run the postprocessing
if ( pproc_type == "fcompare" ): # running fcompare since analytical solution is known
errors = np.empty([len(resolution),len(vars)+1])
pltfile=[]
for res in range(len(resolution)):
case = resolution[res]
errors[res,0] = case
# Get the fcompare inputs: first and last solution of current case
# TODO: the analytical solution might not be plt****_00000 ...
for f in os.listdir(run_dir):
if ( not fnmatch.fnmatch(f, '*old*')):
if (f.startswith("{}_plt_{}_".format(args.test_name,case))):
pltfile.append(f)
pltfile.sort()
outfile = "error_{}.analysis.out".format(case)
os.system("./{} -n 2 {} {} > {}".format(os.path.basename(args.pproc_exe), pltfile[0], pltfile[-1], outfile))
pltfile.clear()
# Extract errors on each variable
with open(outfile) as fp:
for i, line in enumerate(fp):
if (i >= 5):
var = line.split()[0]
for v in range(len(vars)):
if ( var == vars[v] ):
errors[res,v+1] = line.split()[1]
os.system("rm {}".format(outfile))
elif ( pproc_type == "diffsamedomain" ): # running diffsamedomain. No analytical sol ...
errors = np.empty([len(resolution)-1,len(vars)+1])
pltfile=[]
pltfilenext=[]
for res in range(len(resolution)-1):
case = resolution[res]
nextcase = resolution[res+1]
errors[res,0] = case
# Get the diffsamedomain inputs: last solutions of current
# and next finer cases. These run should have been runned to the same final time
for f in os.listdir(run_dir):
if ( not fnmatch.fnmatch(f, '*old*')):
if (f.startswith("{}_plt_{}_".format(args.test_name,case))):
pltfile.append(f)
if (f.startswith("{}_plt_{}_".format(args.test_name,nextcase))):
pltfilenext.append(f)
pltfile.sort()
pltfilenext.sort()
outfile = "error_{}.analysis.out".format(case)
os.system("./{} infile1={} reffile={} > {}".format(os.path.basename(args.pproc_exe), pltfile[-1], pltfilenext[-1], outfile))
pltfile.clear()
pltfilenext.clear()
# Extract errors on each variable
with open(outfile) as fp:
for i, line in enumerate(fp):
if (i >= 5):
var = line.split(":")[0]
for v in range(len(vars)):
if ( var.split(" ")[0] == vars[v] ):
errors[res,v+1] = line.split(":")[1]
os.system("rm {}".format(outfile))
else:
print("Wrong pproc_type: {}. should be either fcompare or diffsamedomain".format(pproc_type))
return
print(errors)
# Plot data
plotdata(errors, args.test_name, vars)
writetex(errors, args.test_name, vars)
writeRegTestFile(errors, args.test_name, vars)
def plotdata(data, test_name, vars):
# Evaluate 2nd order slope
snd_order = data[:,1]*1.05
for i in range(1,len(data[:,1])):
snd_order[i] = snd_order[i-1]/np.exp(2.0*np.log(2.0))
for i in range(0, len(vars)):
plt.plot(data[:,0], data[:,i+1], label="{}".format(vars[i]))
plt.plot(data[:,0], snd_order[:],linestyle='--',color='k', label='2nd-order')
plt.xlabel("Resolution")
plt.ylabel("Error L2norm")
plt.xscale("log")
plt.yscale("log")
plt.grid(which='both',color='k', linestyle=':', linewidth=1)
plt.legend(bbox_to_anchor=(0.9, 0.9), loc=1, borderaxespad=0.)
plt.savefig("Convergence_{}.png".format(test_name))
def
|
(data, test_name, vars):
# Evaluate order
conv_order = np.empty([len(data[:,0])-1,len(vars)])
for v in range(len(vars)):
for i in range(len(conv_order[:,0])):
conv_order[i,v] = np.log(data[i,v+1]/data[i+1,v+1])/np.log(2.0)
fout = open("ConvTable_{}.tex".format(test_name), "w")
fout.write("\\begin{table}[ht!]\n")
fout.write("\centering\n")
fout.write("\\begin{tabular}{l|")
for i in range(len(conv_order[:,0])):
fout.write("c ")
fout.write("}\n")
fout.write("\hline\n")
fout.write("Variable ")
for i in range(len(conv_order[:,0])):
fout.write("& {}/{} ".format(data[i+1,0],data[i,0]))
fout.write("\\\\\n\hline\hline\n")
for v in range(len(vars)):
fout.write("{} ".format(vars[v].replace("_","\_")))
for i in range(len(conv_order[:,0])):
fout.write("& {:.3f} ".format(conv_order[i,v]))
fout.write("\\\\\n")
fout.write("\end{tabular}\n")
fout.write("\caption{PeleLM convergence order}\n")
fout.write("\label{table:conv}\n")
fout.write("\end{table}\n")
fout.close()
def writeRegTestFile(data, test_name, vars):
# Evaluate order
conv_order = np.empty([len(data[:,0])-1,len(vars)])
for v in range(len(vars)):
for i in range(len(conv_order[:,0])):
conv_order[i,v] = np.log(data[i,v+1]/data[i+1,v+1])/np.log(2.0)
fout = open("Convergence_{}.dat".format(test_name), "w")
fout.write(" Variables ")
for i in range(len(conv_order[:,0])):
fout.write(" {}/{} ".format(data[i+1,0],data[i,0]))
fout.write("\n")
for v in range(len(vars)):
fout.write("{} ".format(vars[v]))
for i in range(len(conv_order[:,0])):
fout.write(" {:.3f} ".format(conv_order[i,v]))
fout.write("\n")
fout.close()
def parse_args(arg_string=None):
parser = argparse.ArgumentParser(description=USAGE)
parser.add_argument("--test_name", type=str, default="None", metavar="test-name",
help="name of the test. Default = current folder name")
parser.add_argument("--pproc_exe", type=str, default="None", metavar="pproc.exe",
help="path to the executable required for the analysis.")
if not arg_string is None:
args, unknown = parser.parse_known_args(arg_string)
else:
args, unknown = parser.parse_known_args()
return args
if __name__ == "__main__":
arg_string_prepend = ["--pproc_exe"]+sys.argv[1:]
args = parse_args(arg_string=arg_string_prepend)
pproc(args)
|
writetex
|
test_backend_endpoint_policy.py
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from six.moves import range
from testtools import matchers
from keystone import exception
from keystone.tests import unit
class PolicyAssociationTests(object):
def _assert_correct_policy(self, endpoint, policy):
ref = (
self.endpoint_policy_api.get_policy_for_endpoint(endpoint['id']))
self.assertEqual(policy['id'], ref['id'])
def _assert_correct_endpoints(self, policy, endpoint_list):
endpoint_id_list = [ep['id'] for ep in endpoint_list]
endpoints = (
self.endpoint_policy_api.list_endpoints_for_policy(policy['id']))
self.assertThat(endpoints, matchers.HasLength(len(endpoint_list)))
for endpoint in endpoints:
self.assertIn(endpoint['id'], endpoint_id_list)
def load_sample_data(self):
"""Create sample data to test policy associations.
The following data is created:
- 3 regions, in a hierarchy, 0 -> 1 -> 2 (where 0 is top)
- 3 services
- 6 endpoints, 2 in each region, with a mixture of services:
0 - region 0, Service 0
1 - region 0, Service 1
2 - region 1, Service 1
3 - region 1, Service 2
4 - region 2, Service 2
5 - region 2, Service 0
"""
def new_endpoint(region_id, service_id):
endpoint = unit.new_endpoint_ref(interface='test',
region_id=region_id,
service_id=service_id,
url='/url')
self.endpoint.append(self.catalog_api.create_endpoint(
endpoint['id'], endpoint))
self.policy = []
self.endpoint = []
self.service = []
self.region = []
parent_region_id = None
for i in range(3):
policy = unit.new_policy_ref()
self.policy.append(self.policy_api.create_policy(policy['id'],
policy))
service = unit.new_service_ref()
self.service.append(self.catalog_api.create_service(service['id'],
service))
region = unit.new_region_ref(parent_region_id=parent_region_id)
# Link the regions together as a hierarchy, [0] at the top
parent_region_id = region['id']
self.region.append(self.catalog_api.create_region(region))
new_endpoint(self.region[0]['id'], self.service[0]['id'])
new_endpoint(self.region[0]['id'], self.service[1]['id'])
new_endpoint(self.region[1]['id'], self.service[1]['id'])
new_endpoint(self.region[1]['id'], self.service[2]['id'])
new_endpoint(self.region[2]['id'], self.service[2]['id'])
new_endpoint(self.region[2]['id'], self.service[0]['id'])
def test_policy_to_endpoint_association_crud(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.check_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.delete_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'])
def test_overwriting_policy_to_endpoint_association(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.check_policy_association(
self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'])
def test_invalid_policy_to_endpoint_association(self):
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'])
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'],
region_id=self.region[0]['id'])
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'],
service_id=self.service[0]['id'])
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'],
region_id=self.region[0]['id'])
def test_policy_to_explicit_endpoint_association(self):
# Associate policy 0 with endpoint 0
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self._assert_correct_policy(self.endpoint[0], self.policy[0])
self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.get_policy_for_endpoint,
uuid.uuid4().hex)
def test_policy_to_service_association(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], service_id=self.service[1]['id'])
# Endpoints 0 and 5 are part of service 0
self._assert_correct_policy(self.endpoint[0], self.policy[0])
self._assert_correct_policy(self.endpoint[5], self.policy[0])
self._assert_correct_endpoints(
self.policy[0], [self.endpoint[0], self.endpoint[5]])
# Endpoints 1 and 2 are part of service 1
self._assert_correct_policy(self.endpoint[1], self.policy[1])
self._assert_correct_policy(self.endpoint[2], self.policy[1])
self._assert_correct_endpoints(
self.policy[1], [self.endpoint[1], self.endpoint[2]])
def test_policy_to_region_and_service_association(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], service_id=self.service[1]['id'],
region_id=self.region[1]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[2]['id'], service_id=self.service[2]['id'],
region_id=self.region[2]['id'])
# Endpoint 0 is in region 0 with service 0, so should get policy 0
self._assert_correct_policy(self.endpoint[0], self.policy[0])
# Endpoint 5 is in Region 2 with service 0, so should also get
# policy 0 by searching up the tree to Region 0
self._assert_correct_policy(self.endpoint[5], self.policy[0])
# Looking the other way round, policy 2 should only be in use by
# endpoint 4, since that's the only endpoint in region 2 with the
# correct service
self._assert_correct_endpoints(
self.policy[2], [self.endpoint[4]])
# Policy 1 should only be in use by endpoint 2, since that's the only
# endpoint in region 1 (and region 2 below it) with the correct service
self._assert_correct_endpoints(
self.policy[1], [self.endpoint[2]])
# Policy 0 should be in use by endpoint 0, as well as 5 (since 5 is
# of the correct service and in region 2 below it)
self._assert_correct_endpoints(
self.policy[0], [self.endpoint[0], self.endpoint[5]])
def test_delete_association_by_entity(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.delete_association_by_endpoint(
self.endpoint[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'])
# Make sure deleting it again is silent - since this method is used
# in response to notifications by the controller.
self.endpoint_policy_api.delete_association_by_endpoint(
self.endpoint[0]['id'])
# Now try with service - ensure both combined region & service
# associations and explicit service ones are removed
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], service_id=self.service[0]['id'],
region_id=self.region[1]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'])
self.endpoint_policy_api.delete_association_by_service(
self.service[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[1]['id'],
service_id=self.service[0]['id'],
region_id=self.region[1]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'])
# Finally, check delete by region
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.endpoint_policy_api.delete_association_by_region(
self.region[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
|
service_id=self.service[0]['id'])
|
|
overlay.rs
|
use std::{
collections::{btree_map, BTreeMap, HashSet},
iter::{Iterator, Peekable},
};
use oasis_core_runtime::storage::mkvs;
use super::{NestedStore, Store};
/// An overlay store which keeps values locally until explicitly committed.
pub struct OverlayStore<S: Store> {
parent: S,
overlay: BTreeMap<Vec<u8>, Vec<u8>>,
dirty: HashSet<Vec<u8>>,
}
impl<S: Store> OverlayStore<S> {
/// Create a new overlay store.
pub fn new(parent: S) -> Self {
Self {
parent,
overlay: BTreeMap::new(),
dirty: HashSet::new(),
}
}
}
impl<S: Store> NestedStore for OverlayStore<S> {
type Inner = S;
fn commit(mut self) -> Self::Inner {
// Insert all items present in the overlay.
for (key, value) in self.overlay {
self.dirty.remove(&key);
self.parent.insert(&key, &value);
}
// Any remaining dirty items must have been removed.
for key in self.dirty {
self.parent.remove(&key);
}
self.parent
}
fn has_pending_updates(&self) -> bool {
!self.dirty.is_empty()
}
}
impl<S: Store> Store for OverlayStore<S> {
fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
// For dirty values, check the overlay.
if self.dirty.contains(key) {
return self.overlay.get(key).cloned();
}
// Otherwise fetch from parent store.
self.parent.get(key)
}
fn insert(&mut self, key: &[u8], value: &[u8]) {
self.overlay.insert(key.to_owned(), value.to_owned());
self.dirty.insert(key.to_owned());
}
fn remove(&mut self, key: &[u8]) {
// For dirty values, remove from the overlay.
if self.dirty.contains(key) {
self.overlay.remove(key);
return;
}
// Since we don't care about the previous value, we can just record an update.
self.dirty.insert(key.to_owned());
}
fn iter(&self) -> Box<dyn mkvs::Iterator + '_> {
Box::new(OverlayStoreIterator::new(self))
}
}
/// An iterator over the `OverlayStore`.
pub(crate) struct OverlayStoreIterator<'store, S: Store> {
store: &'store OverlayStore<S>,
parent: Box<dyn mkvs::Iterator + 'store>,
overlay: Peekable<btree_map::Range<'store, Vec<u8>, Vec<u8>>>,
overlay_valid: bool,
key: Option<Vec<u8>>,
value: Option<Vec<u8>>,
}
impl<'store, S: Store> OverlayStoreIterator<'store, S> {
fn new(store: &'store OverlayStore<S>) -> Self {
Self {
store,
parent: store.parent.iter(),
overlay: store.overlay.range(vec![]..).peekable(),
overlay_valid: true,
key: None,
value: None,
}
}
fn update_iterator_position(&mut self) {
// Skip over any dirty entries from the parent iterator.
loop {
if !self.parent.is_valid()
|| !self
.store
.dirty
.contains(self.parent.get_key().as_ref().expect("parent.is_valid"))
{
break;
}
self.parent.next();
}
let i_key = self.parent.get_key();
let o_item = self.overlay.peek();
self.overlay_valid = o_item.is_some();
if self.parent.is_valid()
&& (!self.overlay_valid
|| i_key.as_ref().expect("parent.is_valid") < o_item.expect("overlay_valid").0)
{
// Key of parent iterator is smaller than the key of the overlay iterator.
self.key = i_key.clone();
self.value = self.parent.get_value().clone();
} else if self.overlay_valid {
// Key of overlay iterator is smaller than or equal to the key of the parent iterator.
|
self.value = Some(o_value.to_vec());
} else {
// Both iterators are invalid.
self.key = None;
self.value = None;
}
}
fn next(&mut self) {
if !self.overlay_valid
|| (self.parent.is_valid()
&& self.parent.get_key().as_ref().expect("parent.is_valid")
<= self.overlay.peek().expect("overlay_valid").0)
{
// Key of parent iterator is smaller or equal than the key of the overlay iterator.
self.parent.next();
} else {
// Key of parent iterator is greater than the key of the overlay iterator.
self.overlay.next();
}
self.update_iterator_position();
}
}
impl<'store, S: Store> Iterator for OverlayStoreIterator<'store, S> {
type Item = (Vec<u8>, Vec<u8>);
fn next(&mut self) -> Option<Self::Item> {
use mkvs::Iterator;
if !self.is_valid() {
return None;
}
let key = self.key.as_ref().expect("iterator is valid").clone();
let value = self.value.as_ref().expect("iterator is valid").clone();
OverlayStoreIterator::next(self);
Some((key, value))
}
}
impl<'store, S: Store> mkvs::Iterator for OverlayStoreIterator<'store, S> {
fn set_prefetch(&mut self, prefetch: usize) {
self.parent.set_prefetch(prefetch)
}
fn is_valid(&self) -> bool {
// If either iterator is valid, the merged iterator is valid.
self.parent.is_valid() || self.overlay_valid
}
fn error(&self) -> &Option<anyhow::Error> {
self.parent.error()
}
fn rewind(&mut self) {
self.seek(&[]);
}
fn seek(&mut self, key: &[u8]) {
self.parent.seek(key);
self.overlay = self.store.overlay.range(key.to_vec()..).peekable();
self.update_iterator_position();
}
fn get_key(&self) -> &Option<mkvs::Key> {
&self.key
}
fn get_value(&self) -> &Option<Vec<u8>> {
&self.value
}
fn next(&mut self) {
OverlayStoreIterator::next(self)
}
}
|
let (o_key, o_value) = o_item.expect("overlay_valid");
self.key = Some(o_key.to_vec());
|
mod.rs
|
use std::{collections::HashMap, str::from_utf8};
use crate::{
derivation::basic::Basic,
derivation::self_addressing::SelfAddressing,
derivation::self_signing::SelfSigning,
error::Error,
event::event_data::inception::InceptionEvent,
event::{
event_data::interaction::InteractionEvent,
sections::{
seal::{DigestSeal, Seal},
WitnessConfig,
},
},
event::{
event_data::receipt::ReceiptTransferable, event_data::rotation::RotationEvent,
sections::seal::EventSeal,
},
event::{
event_data::EventData,
sections::{nxt_commitment, InceptionWitnessConfig, KeyConfig},
Event, EventMessage, SerializationFormats,
},
event_message::parse::{signed_event_stream, Deserialized},
event_message::SignedEventMessage,
log::EventLog,
prefix::AttachedSignaturePrefix,
prefix::IdentifierPrefix,
prefix::Prefix,
signer::CryptoBox,
state::IdentifierState,
};
mod test;
pub struct Keri {
key_manager: CryptoBox,
kel: EventLog,
state: IdentifierState,
receipts: HashMap<u64, Vec<SignedEventMessage>>,
escrow_sigs: Vec<SignedEventMessage>,
other_instances: HashMap<String, IdentifierState>,
}
impl Keri {
// incept a state and keys
pub fn new() -> Result<Keri, Error> {
let key_manager = CryptoBox::new()?;
let icp = InceptionEvent::new(
KeyConfig::new(
vec![Basic::Ed25519.derive(key_manager.public_key())],
Some(nxt_commitment(
1,
&[Basic::Ed25519.derive(key_manager.next_pub_key.clone())],
SelfAddressing::Blake3_256,
)),
Some(1),
),
None,
None,
)
.incept_self_addressing(SelfAddressing::Blake3_256, SerializationFormats::JSON)?;
let sigged = icp.sign(vec![AttachedSignaturePrefix::new(
SelfSigning::Ed25519Sha512,
key_manager.sign(&icp.serialize()?)?,
0,
)]);
let mut log = EventLog::new();
let s0 = IdentifierState::default().apply(&sigged)?;
s0.current
.verify(&sigged.event_message.serialize()?, &sigged.signatures)?;
log.commit(sigged)?;
Ok(Keri {
kel: log,
receipts: HashMap::new(),
state: s0,
key_manager,
escrow_sigs: vec![],
other_instances: HashMap::new(),
})
}
pub fn rotate(&mut self) -> Result<SignedEventMessage, Error> {
self.key_manager = self.key_manager.rotate()?;
let ev = {
Event {
prefix: self.state.prefix.clone(),
|
vec![Basic::Ed25519.derive(self.key_manager.public_key())],
Some(nxt_commitment(
1,
&[Basic::Ed25519.derive(self.key_manager.next_pub_key.clone())],
SelfAddressing::Blake3_256,
)),
Some(1),
),
witness_config: WitnessConfig::default(),
data: vec![],
}),
}
.to_message(SerializationFormats::JSON)?
};
let signature = self.key_manager.sign(&ev.serialize()?)?;
let rot = ev.sign(vec![AttachedSignaturePrefix::new(
SelfSigning::Ed25519Sha512,
signature,
0,
)]);
self.state = self.state.clone().apply(&rot)?;
self.state
.current
.verify(&rot.event_message.serialize()?, &rot.signatures)?;
self.kel.commit(rot.clone())?;
Ok(rot)
}
pub fn make_ixn(&mut self, payload: &str) -> Result<SignedEventMessage, Error> {
let dig_seal = DigestSeal {
dig: SelfAddressing::Blake3_256.derive(payload.as_bytes()),
};
let ev = Event {
prefix: self.state.prefix.clone(),
sn: self.state.sn + 1,
event_data: EventData::Ixn(InteractionEvent {
previous_event_hash: SelfAddressing::Blake3_256.derive(&self.state.last),
data: vec![Seal::Digest(dig_seal)],
}),
}
.to_message(SerializationFormats::JSON)?;
let signature = self.key_manager.sign(&ev.serialize()?)?;
let ixn = ev.sign(vec![AttachedSignaturePrefix::new(
SelfSigning::Ed25519Sha512,
signature,
0,
)]);
self.state = self.state.clone().apply(&ixn)?;
self.state
.current
.verify(&ixn.event_message.serialize()?, &ixn.signatures)?;
self.kel.commit(ixn.clone())?;
Ok(ixn)
}
pub fn process_events(&mut self, msg: &[u8]) -> Result<String, Error> {
let events = signed_event_stream(msg)
.map_err(|_| Error::DeserializationError)?
.1;
let mut response: Vec<SignedEventMessage> = vec![];
for dev in events {
match dev {
Deserialized::Event(ev) => match ev.event.event.event.event_data {
EventData::Icp(_) => {
let ev_prefix = ev.event.event.event.prefix.to_str();
let state = IdentifierState::default().apply(&ev.event.event)?;
state.current.verify(ev.event.raw, &ev.signatures)?;
if !self.other_instances.contains_key(&ev_prefix) {
if let Some(icp) = self.kel.get_last() {
response.push(icp);
}
}
self.other_instances.insert(ev_prefix.clone(), state);
let rct = self.make_rct(ev.event.event)?;
response.push(rct);
}
_ => {
let prefix_str = ev.event.event.event.prefix.to_str();
let state = self
.other_instances
.remove(&prefix_str)
.unwrap_or(IdentifierState::default());
self.other_instances
.insert(prefix_str.clone(), state.apply(&ev.event.event)?);
let rct = self.make_rct(ev.event.event)?;
response.push(rct);
}
},
Deserialized::Vrc(r) => match r.event_message.event.event_data {
EventData::Vrc(ref rct) => {
let prefix_str = rct.validator_seal.prefix.to_str();
let validator = self.other_instances.get(&prefix_str).unwrap().clone();
self.process_receipt(validator, r).unwrap();
}
// NOTE should never happen
_ => Err(Error::SemanticError("Incorrect Receipt Structure".into()))?,
},
Deserialized::Rct(_) => todo!(),
}
}
let str_res = response
.iter()
.map(|ev| from_utf8(&ev.serialize().unwrap()).unwrap().to_string())
.collect::<Vec<_>>()
.concat();
Ok(str_res)
}
// take a receipt made by validator, verify it and add to receipts or escrow
fn process_receipt(
&mut self,
validator: IdentifierState,
sigs: SignedEventMessage,
) -> Result<(), Error> {
match sigs.event_message.event.event_data.clone() {
EventData::Vrc(rct) => {
let event = self.kel.get(sigs.event_message.event.sn)?;
// This logic can in future be moved to the correct place in the Kever equivalent here
// receipt pref is the ID who made the event being receipted
if sigs.event_message.event.prefix == self.state.prefix
// dig is the digest of the event being receipted
&& rct.receipted_event_digest
== rct
.receipted_event_digest
.derivation
.derive(&event.event_message.serialize()?)
// seal pref is the pref of the validator
&& rct.validator_seal.prefix == validator.prefix
{
if rct.validator_seal.event_digest
== rct
.validator_seal
.event_digest
.derivation
.derive(&validator.last)
{
// seal dig is the digest of the last establishment event for the validator, verify the rct
validator
.current
.verify(&event.event_message.serialize()?, &sigs.signatures)?;
self.receipts
.entry(sigs.event_message.event.sn)
.or_insert_with(|| vec![])
.push(sigs);
} else {
// escrow the seal
self.escrow_sigs.push(sigs)
}
Ok(())
} else {
Err(Error::SemanticError("incorrect receipt binding".into()))
}
}
_ => Err(Error::SemanticError("not a receipt".into())),
}
}
fn make_rct(&self, event: EventMessage) -> Result<SignedEventMessage, Error> {
let ser = event.serialize()?;
let signature = self.key_manager.sign(&ser)?;
Ok(Event {
prefix: event.event.prefix,
sn: event.event.sn,
event_data: EventData::Vrc(ReceiptTransferable {
receipted_event_digest: SelfAddressing::Blake3_256.derive(&ser),
validator_seal: EventSeal {
prefix: self.state.prefix.clone(),
sn: self.state.sn,
event_digest: SelfAddressing::Blake3_256.derive(&self.state.last),
},
}),
}
.to_message(SerializationFormats::JSON)?
.sign(vec![AttachedSignaturePrefix::new(
SelfSigning::Ed25519Sha512,
signature,
0,
)]))
}
pub fn get_last_event(&self) -> String {
match self.kel.get_last() {
Some(ev) => from_utf8(&ev.serialize().unwrap()).unwrap().to_string(),
None => String::new(),
}
}
pub fn get_log_len(&self) -> usize {
self.kel.get_len()
}
pub fn get_state(&self) -> IdentifierState {
self.state.clone()
}
}
|
sn: self.state.sn + 1,
event_data: EventData::Rot(RotationEvent {
previous_event_hash: SelfAddressing::Blake3_256.derive(&self.state.last),
key_config: KeyConfig::new(
|
core.ts
|
import {LanguageCore} from '@taiga-ui/i18n/interfaces';
import {TUI_SPANISH_LANGUAGE_COUNTRIES} from './countries';
export const TUI_SPANISH_LANGUAGE_CORE: LanguageCore = {
months: [
'Enero',
'Febrero',
'Marzo',
'Abril',
'Mayo',
'Junio',
'Julio',
'Agosto',
'Septiembre',
'Octubre',
'Noviembre',
'Diciembre',
],
close: 'Cerrar',
nothingFoundMessage: 'Nada encontrado',
|
spinTexts: ['Previo', 'Siguiente'],
shortWeekDays: ['Lun', 'Mar', 'Mié', 'Jue', 'Vie', 'Sáb', 'Dom'],
// TODO: i18n replace with current language countries list
countries: TUI_SPANISH_LANGUAGE_COUNTRIES,
};
|
defaultErrorMessage: 'El valor es inválido',
|
notification.go
|
package influxdb
import (
"context"
"encoding/json"
)
// Updater is general interface to embed
// with any domain level interface to do crud related ops.
type Updater interface {
CRUDLogSetter
SetID(id ID)
SetOrgID(id ID)
SetName(name string)
SetStatus(status Status)
SetDescription(description string)
}
// Getter is a general getter interface
// to return id, orgID...
type Getter interface {
GetID() ID
GetCRUDLog() CRUDLog
GetOrgID() ID
GetName() string
GetStatus() Status
GetDescription() string
}
// NotificationRule is a *Query* of a *Status Bucket* that returns the *Status*.
// When warranted by the rules, sends a *Message* to a 3rd Party
// using the *Notification Endpoint* and stores a receipt in the *Notifications Bucket*.
type NotificationRule interface {
Valid() error
Type() string
json.Marshaler
Updater
Getter
SetOwnerID(id ID)
ClearPrivateData()
GetOwnerID() ID
SetTaskID(id ID)
GetTaskID() ID
GetEndpointID() ID
GetLimit() *Limit
GenerateFlux(NotificationEndpoint) (string, error)
}
// Limit don't notify me more than <limit> times every <limitEvery> seconds.
// If set, limit cannot be empty.
type Limit struct {
Rate int `json:"limit,omitempty"`
// every seconds.
Every int `json:"limitEvery,omitempty"`
}
// NotificationRuleFilter represents a set of filter that restrict the returned notification rules.
type NotificationRuleFilter struct {
OrgID *ID
Organization *string
UserResourceMappingFilter
}
// QueryParams Converts NotificationRuleFilter fields to url query params.
func (f NotificationRuleFilter) QueryParams() map[string][]string {
qp := map[string][]string{}
if f.OrgID != nil {
qp["orgID"] = []string{f.OrgID.String()}
}
if f.Organization != nil {
qp["org"] = []string{*f.Organization}
}
return qp
}
// NotificationRuleUpdate is the set of upgrade fields for patch request.
type NotificationRuleUpdate struct {
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
Status *Status `json:"status,omitempty"`
}
// Valid will verify if the NotificationRuleUpdate is valid.
func (n *NotificationRuleUpdate) Valid() error {
if n.Name != nil && *n.Name == "" {
return &Error{
Code: EInvalid,
Msg: "Notification Rule Name can't be empty",
}
}
if n.Description != nil && *n.Description == "" {
return &Error{
Code: EInvalid,
Msg: "Notification Rule Description can't be empty",
}
}
if n.Status != nil
|
return nil
}
// NotificationRuleStore represents a service for managing notification rule.
type NotificationRuleStore interface {
// UserResourceMappingService must be part of all NotificationRuleStore service,
// for create, search, delete.
UserResourceMappingService
// OrganizationService is needed for search filter
OrganizationService
// FindNotificationRuleByID returns a single notification rule by ID.
FindNotificationRuleByID(ctx context.Context, id ID) (NotificationRule, error)
// FindNotificationRules returns a list of notification rules that match filter and the total count of matching notification rules.
// Additional options provide pagination & sorting.
FindNotificationRules(ctx context.Context, filter NotificationRuleFilter, opt ...FindOptions) ([]NotificationRule, int, error)
// CreateNotificationRule creates a new notification rule and sets b.ID with the new identifier.
CreateNotificationRule(ctx context.Context, nr NotificationRule, userID ID) error
// UpdateNotificationRuleUpdateNotificationRule updates a single notification rule.
// Returns the new notification rule after update.
UpdateNotificationRule(ctx context.Context, id ID, nr NotificationRule, userID ID) (NotificationRule, error)
// PatchNotificationRule updates a single notification rule with changeset.
// Returns the new notification rule state after update.
PatchNotificationRule(ctx context.Context, id ID, upd NotificationRuleUpdate) (NotificationRule, error)
// DeleteNotificationRule removes a notification rule by ID.
DeleteNotificationRule(ctx context.Context, id ID) error
}
|
{
if err := n.Status.Valid(); err != nil {
return err
}
}
|
settings.py
|
# -*- coding: utf-8 -*-
# Scrapy settings for amazon_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amazon_scraper'
SPIDER_MODULES = ['amazon_scraper.spiders']
NEWSPIDER_MODULE = 'amazon_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'amazon_scraper_3 (+your@email.here)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 2
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amazon_scraper.middlewares.AmazonScraperSpiderMiddleware': 543,
#}
|
#DOWNLOADER_MIDDLEWARES = {
# 'amazon_scraper.middlewares.AmazonScraperDownloaderMiddleware': 543,
#}
# DOWNLOADER_MIDDLEWARES = {
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,
# }
# RANDOM_UA_TYPE = "desktop"
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'amazon_scraper.pipelines.AmazonScraperPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
|
hooks.rs
|
pub const HOOK_CREATION_ERROR: &str =
"Fatal error encountered while trying to create git hook files";
pub const NO_CONFIG_FILE_FOUND_ERROR_CODE: i32 = 3;
const MINIMUM_CLI_MAJOR_VERSION: i32 = 0;
const MINIMUM_CLI_MINOR_VERSION: i32 = 12;
const MINIMUM_CLI_PATCH_VERSION: i32 = 0;
const MINIMUM_CLI_VERSION_ALLOW_PRERELEASE: bool = false;
const VERSION: &str = env!("CARGO_PKG_VERSION");
const HOOK_FILE_TEMPLATE: &str = include_str!("hook_files/hook_script.sh");
const HOOK_CLI_SCRIPT_FILE_TEMPLATE: &str = include_str!("hook_files/cli.sh");
const HOOK_SEMVER_SCRIPT_FILE_TEMPLATE: &str = include_str!("hook_files/semver.sh");
const HOOK_NAMES: [&str; 19] = [
"applypatch-msg",
"pre-applypatch",
"post-applypatch",
"pre-commit",
"prepare-commit-msg",
"commit-msg",
"post-commit",
"pre-rebase",
"post-checkout",
"post-merge",
"pre-push",
"pre-receive",
"update",
"post-receive",
"post-update",
"push-to-checkout",
"pre-auto-gc",
"post-rewrite",
"sendemail-validate",
];
const CLI_SCRIPT_NAME: &str = "cli.sh";
const SEMVER_SCRIPT_NAME: &str = "semver.sh";
fn
|
() -> String {
String::from(HOOK_FILE_TEMPLATE).replace("{{VERSION}}", VERSION)
}
fn get_cli_script_file_contents() -> String {
let exit_code = &NO_CONFIG_FILE_FOUND_ERROR_CODE.to_string();
let minimum_major = &MINIMUM_CLI_MAJOR_VERSION.to_string();
let minimum_minor = &MINIMUM_CLI_MINOR_VERSION.to_string();
let minimum_patch = &MINIMUM_CLI_PATCH_VERSION.to_string();
let minimum_allow_pre = &MINIMUM_CLI_VERSION_ALLOW_PRERELEASE.to_string();
String::from(HOOK_CLI_SCRIPT_FILE_TEMPLATE)
.replace("{{VERSION}}", VERSION)
.replace("\n# shellcheck disable=SC2170,SC1083", "")
.replace("{{NO_CONFIG_FILE_EXIT_CODE}}", exit_code)
.replace("{{MINIMUM_MAJOR}}", minimum_major)
.replace("{{MINIMUM_MINOR}}", minimum_minor)
.replace("{{MINIMUM_PATCH}}", minimum_patch)
.replace("{{MINIMUM_ALLOW_PRE}}", minimum_allow_pre)
}
fn get_semver_script_file_contents() -> String {
String::from(HOOK_SEMVER_SCRIPT_FILE_TEMPLATE).replace("{{VERSION}}", VERSION)
}
fn get_file_path(root_directory_path: &str, hooks_directory: &str, file: &str) -> String {
format!("{}/{}/{}", root_directory_path, hooks_directory, file)
}
pub fn create_hook_files<F>(
write_file: F,
root_directory_path: &str,
hooks_directory: &str,
) -> Result<(), String>
where
F: Fn(&str, &str, bool) -> Result<(), String>,
{
let hook_file_contents = get_hook_file_contents();
for hook in HOOK_NAMES.iter() {
let path = get_file_path(root_directory_path, hooks_directory, hook);
if write_file(&path, &hook_file_contents, true).is_err() {
return Err(String::from(HOOK_CREATION_ERROR));
};
}
let cli_file_contents = get_cli_script_file_contents();
let cli_file_path = get_file_path(root_directory_path, hooks_directory, CLI_SCRIPT_NAME);
if write_file(&cli_file_path, &cli_file_contents, true).is_err() {
return Err(String::from(HOOK_CREATION_ERROR));
};
let semver_file_contents = get_semver_script_file_contents();
let semver_file_path = get_file_path(root_directory_path, hooks_directory, SEMVER_SCRIPT_NAME);
if write_file(&semver_file_path, &semver_file_contents, true).is_err() {
return Err(String::from(HOOK_CREATION_ERROR));
};
Ok(())
}
#[cfg(test)]
#[path = "hooks_test.rs"]
mod hooks_tests;
|
get_hook_file_contents
|
astarte_test.go
|
/*
This file is part of Astarte.
Copyright 2020 Ispirata Srl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e010
import (
"testing"
"time"
"github.com/astarte-platform/astarte-kubernetes-operator/pkg/apis"
operator "github.com/astarte-platform/astarte-kubernetes-operator/pkg/apis/api/v1alpha1"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
)
var (
retryInterval = time.Second * 10
timeout = time.Second * 420
cleanupRetryInterval = time.Second * 1
cleanupTimeout = time.Second * 5
)
func
|
(t *testing.T) {
astarteList := &operator.AstarteList{}
err := framework.AddToFrameworkScheme(apis.AddToScheme, astarteList)
if err != nil {
t.Fatalf("failed to add custom resource scheme to framework: %v", err)
}
// run subtests
t.Run("astarte-group", func(t *testing.T) {
t.Run("Cluster", AstarteCluster)
})
}
func AstarteCluster(t *testing.T) {
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup()
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil {
t.Fatalf("failed to initialize cluster resources: %v", err)
}
t.Log("Initialized cluster resources")
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
// get global framework variables
f := framework.Global
// wait for astarte-operator to be ready
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, "astarte-operator", 1, retryInterval, timeout)
if err != nil {
t.Fatal(err)
}
if err = astarteDeploy010Test(t, f, ctx); err != nil {
t.Fatal(err)
}
if err = astarteUpgradeTo011Test(t, f, ctx); err != nil {
t.Fatal(err)
}
if err = astarteDeleteTest(t, f, ctx); err != nil {
t.Fatal(err)
}
}
|
TestAstarte
|
list_response.go
|
package contracts
// ListResponse is a container for paginated filtered list items
type ListResponse struct {
Items []interface{} `json:"items"`
Pagination Pagination `json:"pagination"`
}
// Pagination indicates the current page, the size of the pages and total pages / items
|
Page int `json:"page"`
Size int `json:"size"`
TotalPages int `json:"totalPages"`
TotalItems int `json:"totalItems"`
}
|
type Pagination struct {
|
migrate.go
|
/*
Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
|
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package gcp contains the cloud provider specific implementations to manage machines
package gcp
import (
"encoding/json"
api "github.com/gardener/machine-controller-manager-provider-gcp/pkg/api/v1alpha1"
v1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// ProviderGCP string const to identify GCP provider
ProviderGCP = "GCP"
)
// fillUpMachineClass copies over the fields from ProviderMachineClass to MachineClass
func fillUpMachineClass(gcpMachineClass *v1alpha1.GCPMachineClass, machineClass *v1alpha1.MachineClass) error {
disks := []*api.GCPDisk{}
for _, gcpDisk := range gcpMachineClass.Spec.Disks {
disk := &api.GCPDisk{
AutoDelete: gcpDisk.AutoDelete,
Boot: gcpDisk.Boot,
Image: gcpDisk.Image,
Interface: gcpDisk.Interface,
Labels: gcpDisk.Labels,
SizeGb: gcpDisk.SizeGb,
Type: gcpDisk.Type,
}
disks = append(disks, disk)
}
metaDataList := []*api.GCPMetadata{}
for _, gcpMetaData := range gcpMachineClass.Spec.Metadata {
metaData := &api.GCPMetadata{
Key: gcpMetaData.Key,
Value: gcpMetaData.Value,
}
metaDataList = append(metaDataList, metaData)
}
networkInterfaces := []*api.GCPNetworkInterface{}
for _, gcpNetworkInterface := range gcpMachineClass.Spec.NetworkInterfaces {
networkInterface := &api.GCPNetworkInterface{
DisableExternalIP: gcpNetworkInterface.DisableExternalIP,
Network: gcpNetworkInterface.Network,
Subnetwork: gcpNetworkInterface.Subnetwork,
}
networkInterfaces = append(networkInterfaces, networkInterface)
}
scheduling := api.GCPScheduling{
AutomaticRestart: gcpMachineClass.Spec.Scheduling.AutomaticRestart,
OnHostMaintenance: gcpMachineClass.Spec.Scheduling.OnHostMaintenance,
Preemptible: gcpMachineClass.Spec.Scheduling.Preemptible,
}
serviceAccounts := []api.GCPServiceAccount{}
for _, gcpServiceAccount := range gcpMachineClass.Spec.ServiceAccounts {
serviceAccount := api.GCPServiceAccount{
Email: gcpServiceAccount.Email,
Scopes: gcpServiceAccount.Scopes,
}
serviceAccounts = append(serviceAccounts, serviceAccount)
}
providerSpec := &api.GCPProviderSpec{
APIVersion: api.APIVersionV1alpha1,
CanIPForward: gcpMachineClass.Spec.CanIpForward,
DeletionProtection: gcpMachineClass.Spec.DeletionProtection,
Description: gcpMachineClass.Spec.Description,
Disks: disks,
Labels: gcpMachineClass.Spec.Labels,
MachineType: gcpMachineClass.Spec.MachineType,
Metadata: metaDataList,
NetworkInterfaces: networkInterfaces,
Region: gcpMachineClass.Spec.Region,
Scheduling: scheduling,
ServiceAccounts: serviceAccounts,
Tags: gcpMachineClass.Spec.Tags,
Zone: gcpMachineClass.Spec.Zone,
}
// Marshal providerSpec into Raw Bytes
providerSpecMarshal, err := json.Marshal(providerSpec)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
// Migrate finalizers, labels, annotations
machineClass.Name = gcpMachineClass.Name
machineClass.Labels = gcpMachineClass.Labels
machineClass.Annotations = gcpMachineClass.Annotations
machineClass.Finalizers = gcpMachineClass.Finalizers
machineClass.ProviderSpec = runtime.RawExtension{
Raw: providerSpecMarshal,
}
machineClass.SecretRef = gcpMachineClass.Spec.SecretRef
machineClass.CredentialsSecretRef = gcpMachineClass.Spec.CredentialsSecretRef
machineClass.Provider = ProviderGCP
return nil
}
|
You may obtain a copy of the License at
|
event.py
|
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implements
class IIndexEvent(Interface):
"""
An lower level event involving the index
"""
class IIndexUpdate(Interface):
"""
An low level event involving the index
"""
class IPackageEvent(IIndexEvent):
"""
An event involving a package
"""
path = Attribute('Path to package')
class IPackageAdded(IPackageEvent):
"""
A package is added to the repository
"""
|
A package is removed to the repository
"""
class IndexEvent(object):
implements(IIndexEvent)
def __init__(self, datafile, index, pkgdatas=None):
self.index = index
self.datafile = datafile
self.pkgdatas = pkgdatas
class IndexUpdate(IndexEvent):
implements(IIndexUpdate)
class PackageEvent(object):
"""
Baseclass for pacakage events
"""
implements(IPackageEvent)
def __init__(self, index_manager, path=None, name=None, version=None):
self.name = name
self.version = version
self.im = index_manager
self.path = path
if self.name is None and self.path:
info = self.im.pkginfo_from_file(path, self.im.move_on_error)
self.name = info.name
self.version = info.version
class PackageAdded(PackageEvent):
implements(IPackageAdded)
def __init__(self, index_manager, path=None, name=None, version=None, rebuild_leaf=True):
super(PackageAdded, self).__init__(index_manager, path, name, version)
self.rebuild_leaf = rebuild_leaf
class PackageRemoved(PackageEvent):
implements(IPackageRemoved)
|
class IPackageRemoved(IPackageEvent):
"""
|
ParseResult.ts
|
import CommandError from '../CommandError';
import CommandInfo from '../Info/CommandInfo';
import {Interfaces} from '../Interfaces';
import TypeReaderResult from './TypeReaderResult';
import TypeReaderValue from './TypeReaderValue';
import ResultInterface = Interfaces.ResultInterface;
export default class
|
implements ResultInterface {
public static FromSuccess(argValues: TypeReaderResult[], paramValues: TypeReaderResult[]): ParseResult {
for (let i = 0; i < argValues.length; i++) {
if (!argValues.hasOwnProperty(i)) {
continue;
}
if (argValues[i].values.length > 1) {
return new ParseResult(argValues, paramValues, CommandError.MultipleMatches, 'Multiple matches found.');
}
}
for (let i = 0; i < paramValues.length; i++) {
if (!argValues.hasOwnProperty(i)) {
continue;
}
if (paramValues[i].values.length > 1) {
return new ParseResult(argValues, paramValues, CommandError.MultipleMatches, 'Multiple matches found.');
}
}
return new ParseResult(argValues, paramValues);
}
public static FromMultipleSuccess(
argValues: TypeReaderValue[], paramValues?: TypeReaderValue[],
): ParseResult {
const argList: TypeReaderResult[] = [];
for (let i = 0; i < argValues.length; i++) {
argList[i] = TypeReaderResult.fromSuccess(argValues[i]);
}
let paramList: TypeReaderResult[] = [];
if (paramValues) {
for (let i = 0; i < paramValues.length; i++) {
paramList[i] = TypeReaderResult.fromSuccess(paramValues[i]);
}
}
return new ParseResult(argList, paramList);
}
public static FromError(error: CommandError, reason: string): ParseResult {
return new ParseResult(null, null, error, reason);
}
public isSuccess: boolean;
public Command: CommandInfo;
constructor(
public argValues: TypeReaderResult[],
public paramValues: TypeReaderResult[],
public error?: CommandError,
public errorReason?: string,
) {
this.isSuccess = !error;
}
};
|
ParseResult
|
dispenser_test.go
|
// Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lexer
import (
"reflect"
"strings"
"testing"
)
func TestDispenser_Val_Next(t *testing.T) {
input := `host:port
dir1 arg1
dir2 arg2 arg3
dir3`
d := NewDispenser("Testfile", strings.NewReader(input))
if val := d.Val(); val != "" {
t.Fatalf("Val(): Should return empty string when no token loaded; got '%s'", val)
}
assertNext := func(shouldLoad bool, expectedCursor int, expectedVal string) {
if loaded := d.Next(); loaded != shouldLoad {
t.Errorf("Next(): Expected %v but got %v instead (val '%s')", shouldLoad, loaded, d.Val())
}
if d.cursor != expectedCursor {
t.Errorf("Expected cursor to be %d, but was %d", expectedCursor, d.cursor)
}
if d.nesting != 0 {
t.Errorf("Nesting should be 0, was %d instead", d.nesting)
}
if val := d.Val(); val != expectedVal {
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
}
}
assertNext(true, 0, "host:port")
assertNext(true, 1, "dir1")
assertNext(true, 2, "arg1")
assertNext(true, 3, "dir2")
assertNext(true, 4, "arg2")
assertNext(true, 5, "arg3")
assertNext(true, 6, "dir3")
// Note: This next test simply asserts existing behavior.
// If desired, we may wish to empty the token value after
// reading past the EOF. Open an issue if you want this change.
assertNext(false, 6, "dir3")
}
func TestDispenser_NextArg(t *testing.T) {
input := `dir1 arg1
dir2 arg2 arg3
dir3`
d := NewDispenser("Testfile", strings.NewReader(input))
assertNext := func(shouldLoad bool, expectedVal string, expectedCursor int) {
if d.Next() != shouldLoad {
t.Errorf("Next(): Should load token but got false instead (val: '%s')", d.Val())
}
if d.cursor != expectedCursor {
t.Errorf("Next(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor)
}
if val := d.Val(); val != expectedVal {
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
}
}
assertNextArg := func(expectedVal string, loadAnother bool, expectedCursor int) {
if !d.NextArg() {
t.Error("NextArg(): Should load next argument but got false instead")
}
if d.cursor != expectedCursor {
t.Errorf("NextArg(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor)
}
if val := d.Val(); val != expectedVal {
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
}
if !loadAnother {
if d.NextArg() {
t.Fatalf("NextArg(): Should NOT load another argument, but got true instead (val: '%s')", d.Val())
}
if d.cursor != expectedCursor {
t.Errorf("NextArg(): Expected cursor to remain at %d, but it was %d", expectedCursor, d.cursor)
}
}
}
assertNext(true, "dir1", 0)
assertNextArg("arg1", false, 1)
assertNext(true, "dir2", 2)
assertNextArg("arg2", true, 3)
assertNextArg("arg3", false, 4)
assertNext(true, "dir3", 5)
assertNext(false, "dir3", 5)
}
func TestDispenser_NextLine(t *testing.T) {
input := `host:port
dir1 arg1
dir2 arg2 arg3`
d := NewDispenser("Testfile", strings.NewReader(input))
assertNextLine := func(shouldLoad bool, expectedVal string, expectedCursor int) {
if d.NextLine() != shouldLoad {
t.Errorf("NextLine(): Should load token but got false instead (val: '%s')", d.Val())
}
if d.cursor != expectedCursor {
t.Errorf("NextLine(): Expected cursor to be %d, instead was %d", expectedCursor, d.cursor)
}
if val := d.Val(); val != expectedVal {
t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
}
}
assertNextLine(true, "host:port", 0)
assertNextLine(true, "dir1", 1)
assertNextLine(false, "dir1", 1)
d.Next() // arg1
assertNextLine(true, "dir2", 3)
assertNextLine(false, "dir2", 3)
d.Next() // arg2
assertNextLine(false, "arg2", 4)
d.Next() // arg3
assertNextLine(false, "arg3", 5)
}
func TestDispenser_NextBlock(t *testing.T) {
input := `foobar1 {
sub1 arg1
sub2
}
foobar2 {
}`
d := NewDispenser("Testfile", strings.NewReader(input))
assertNextBlock := func(shouldLoad bool, expectedCursor, expectedNesting int) {
if loaded := d.NextBlock(); loaded != shouldLoad {
t.Errorf("NextBlock(): Should return %v but got %v", shouldLoad, loaded)
}
if d.cursor != expectedCursor {
t.Errorf("NextBlock(): Expected cursor to be %d, was %d", expectedCursor, d.cursor)
}
if d.nesting != expectedNesting {
t.Errorf("NextBlock(): Nesting should be %d, not %d", expectedNesting, d.nesting)
}
}
assertNextBlock(false, -1, 0)
d.Next() // foobar1
assertNextBlock(true, 2, 1)
assertNextBlock(true, 3, 1)
assertNextBlock(true, 4, 1)
assertNextBlock(false, 5, 0)
d.Next() // foobar2
assertNextBlock(false, 8, 0) // empty block is as if it didn't exist
}
func TestDispenser_Args(t *testing.T) {
var s1, s2, s3 string
input := `dir1 arg1 arg2 arg3
dir2 arg4 arg5
dir3 arg6 arg7
dir4`
d := NewDispenser("Testfile", strings.NewReader(input))
d.Next() // dir1
// As many strings as arguments
if all := d.Args(&s1, &s2, &s3); !all {
t.Error("Args(): Expected true, got false")
}
if s1 != "arg1" {
t.Errorf("Args(): Expected s1 to be 'arg1', got '%s'", s1)
}
if s2 != "arg2" {
t.Errorf("Args(): Expected s2 to be 'arg2', got '%s'", s2)
}
if s3 != "arg3" {
t.Errorf("Args(): Expected s3 to be 'arg3', got '%s'", s3)
}
d.Next() // dir2
// More strings than arguments
if all := d.Args(&s1, &s2, &s3); all {
t.Error("Args(): Expected false, got true")
}
if s1 != "arg4" {
t.Errorf("Args(): Expected s1 to be 'arg4', got '%s'", s1)
}
if s2 != "arg5" {
t.Errorf("Args(): Expected s2 to be 'arg5', got '%s'", s2)
|
}
if s3 != "arg3" {
t.Errorf("Args(): Expected s3 to be unchanged ('arg3'), instead got '%s'", s3)
}
// (quick cursor check just for kicks and giggles)
if d.cursor != 6 {
t.Errorf("Cursor should be 6, but is %d", d.cursor)
}
d.Next() // dir3
// More arguments than strings
if all := d.Args(&s1); !all {
t.Error("Args(): Expected true, got false")
}
if s1 != "arg6" {
t.Errorf("Args(): Expected s1 to be 'arg6', got '%s'", s1)
}
d.Next() // dir4
// No arguments or strings
if all := d.Args(); !all {
t.Error("Args(): Expected true, got false")
}
// No arguments but at least one string
if all := d.Args(&s1); all {
t.Error("Args(): Expected false, got true")
}
}
func TestDispenser_RemainingArgs(t *testing.T) {
input := `dir1 arg1 arg2 arg3
dir2 arg4 arg5
dir3 arg6 { arg7
dir4`
d := NewDispenser("Testfile", strings.NewReader(input))
d.Next() // dir1
args := d.RemainingArgs()
if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(args, expected) {
t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
}
d.Next() // dir2
args = d.RemainingArgs()
if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(args, expected) {
t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
}
d.Next() // dir3
args = d.RemainingArgs()
if expected := []string{"arg6"}; !reflect.DeepEqual(args, expected) {
t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
}
d.Next() // {
d.Next() // arg7
d.Next() // dir4
args = d.RemainingArgs()
if len(args) != 0 {
t.Errorf("RemainingArgs(): Expected %v, got %v", []string{}, args)
}
}
func TestDispenser_ArgErr_Err(t *testing.T) {
input := `dir1 {
}
dir2 arg1 arg2`
d := NewDispenser("Testfile", strings.NewReader(input))
d.cursor = 1 // {
if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "{") {
t.Errorf("ArgErr(): Expected an error message with { in it, but got '%v'", err)
}
d.cursor = 5 // arg2
if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "arg2") {
t.Errorf("ArgErr(): Expected an error message with 'arg2' in it; got '%v'", err)
}
err := d.Err("foobar")
if err == nil {
t.Fatalf("Err(): Expected an error, got nil")
}
if !strings.Contains(err.Error(), "Testfile:3") {
t.Errorf("Expected error message with filename:line in it; got '%v'", err)
}
if !strings.Contains(err.Error(), "foobar") {
t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
}
}
| |
temp_driver.py
|
import os, sys
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
from Communication.Modules.Driver_recv import DriverRecv
if __name__ == "__main__":
drv_recv = DriverRecv()
|
drv_recv.start()
|
|
MarketPlaceItems.js
|
import React, { Component } from 'react';
import ReactDOM from 'react-dom'
import update from 'immutability-helper';
import Swiper from 'react-id-swiper';
import SliderHStoreCapture from '../components/SliderHStoreCapture.js'
import $ from 'jquery';
window.jQuery = window.$ = $;
var ReactGA = require('react-ga');
ReactGA.initialize('UA-873612-18');
export default class
|
extends Component {
constructor(props){
super(props);
this.state = {
intervalId: 0,
data : [],
itemSelected: null,
zooom:false,
params :{
rebuildOnUpdate : true,
loop: true,
initialSlide: 0,
simulateTouch:true,
grabCursor: true,
pagination: {
el: '.swiper-pagination',
type: 'bullets',
clickable: true,
},
navigation: {
nextEl: '.swiper-button-next',
prevEl: '.swiper-button-prev'
}
},
modalopened:false,
priceModal:false
}
this.loadData = this.loadData.bind(this);
this.selectItem = this.selectItem.bind(this)
}
scrollStep() {
if (window.pageYOffset === 0) {
clearInterval(this.state.intervalId);
}
window.scroll(0, window.pageYOffset - 50);
}
componentDidMount() {
this.loadData()
}
componentDidUpdate(prevProps, prevState) {
}
selectItem(e, index){
let intervalId = setInterval(this.scrollStep.bind(this), 16.66);
let item = this.state.data[index]
this.setState({
intervalId: intervalId ,
itemSelected: index,
data: update(this.state.data,
{$splice: [[0, 0, item],[index + 1, 1]]
})
})
let reg = new RegExp("<.[^>]*>", "gi" );
let tuileName = this.state.data[index].title.replace(reg, " " );
var ga = ReactGA.ga();
ga('send', 'event', {
eventCategory: tuileName,
eventAction: 'click',
eventLabel: 'tuiles H-Store'
})
}
deselectItem(){
this.setState({
itemSelected: null,
overflow:null
})
}
modalEffect(index, e){
if (e.target.className !== 'close'){
this.setState(prevState => ({
zooom:index,
modalopened:true,
params: {
...prevState.params,
initialSlide: index,
}
}))
}
}
modalEffectOff(e){
if(e.target.className.indexOf('priceModal') !== -1) this.setState({priceModal:false})
if(e.target.className.indexOf('swiper-pagination-bullet') === 0
|| e.target.className.indexOf('swiper-button-next') === 0
|| e.target.className.indexOf('swiper-button-prev') ===0 ) return
if(e.target.className.indexOf('modalSlider') !== -1
|| e.target.className.indexOf('') !== -1) this.setState({modalopened:false})
this.setState({
zooom:false
})
}
loadData () {
let component = this
fetch( process.env.PUBLIC_URL + this.props.jsonPath, {
headers : {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
})
.then(
function(response) {
if (response.status !== 200) {
console.log('Looks like there was a problem. Status Code: ' +
response.status);
return;
}
// Examine the text in the response
response.json().then(function(data) {
component.setState({
data: data
})
return
});
}
)
.catch(function(err) {
console.log('Error', err);
});
}
render() {
const {data, itemSelected, overflow} = this.state
const jsonPath = '/json/dataBannerHSTORE.json'
return (
<div>
<div className={itemSelected === null ? "hStoreBand" : " tight hStoreBand"} >
<div className="marketplace__container">
<div className="hStoreBand-title">
<div className="hStoreBand-mainTitle">Bienvenue dans le <span className="secondarycolor">H-Store</span></div>
<div className="hStoreBand-phrase">Catalogue d’applications connectables à EBP Horizon</div>
</div>
<div className={itemSelected === null ? "hStoreBand-text secondarycolor" : " hide hStoreBand-text secondarycolor"} >
Créez une solution qui vous ressemble !
</div>
</div>
</div>
<div className={itemSelected === null ? "marketplace__edito" : " hide marketplace__edito"}>
<div className={itemSelected === null ? "hStore-triangle" : " hide hStore-triangle"}></div>
<div className="edito_content">
<div className={itemSelected === null ? "marketplace__container" : " wide marketplace__container"}>
<ul>
<li><i className="material-icons">check</i> Disponible pour les clients EBP Horizon OPEN</li>
<li><i className="material-icons">check</i> Complétez votre outil EBP Horizon avec de nouvelles fonctionnalités</li>
<li><i className="material-icons">check</i> Gagnez du temps pour être plus performant </li>
<li><i className="material-icons">check</i> Souscrivez et désactivez les applications depuis EBP Horizon, la connexion est automatique</li>
</ul>
</div>
</div>
</div>
<div className={this.state.modalopened ? "modalSlider opened": "modalSlider"} onClick={this.modalEffectOff.bind(this)}>
<div className="modal-header">
<button><span>×</span></button>
</div>
<SliderHStoreCapture data={this.state.data} params={this.state.params} itemSelected={itemSelected} jsonPath={this.props.jsonPath}/>
</div>
<div className={itemSelected === null ? "marketplace__container" : " wide marketplace__container"}>
<div
className={itemSelected === null ? "marketplace__items" : "marketplace__items opened"}
style={itemSelected === null ? null : overflow }
>
{!!data ? data.map((item, index) =>(
<div key={index}
className={item.connected ? "connected marketplace__item" : "marketplace__item"}
onClick={(e) => this.selectItem(e, index)}
>
<figure>
<div key={index}
className={item.disponible ? "item__img" : "bientot item__img"}>
<img alt="" src={item.src}/>
</div>
<div className="item__heading opened " dangerouslySetInnerHTML={{__html: item.title}}/>
<figcaption>
<div className="item__heading" dangerouslySetInnerHTML={{__html: item.title}}/>
<div className="item__description" dangerouslySetInnerHTML={{__html: item.description}} />
<div className="item__link">
Découvrir l’aplication
</div>
<div className="item__connection">
<span>{item.connected ? "Connecté" : "Se connecter"}</span>
</div>
<div className="item__disponible">
<span>{item.disponible ? "" : ""}</span>
</div>
</figcaption>
</figure>
</div>
)
) : null
}
<div className="close-window" onClick={this.deselectItem.bind(this)}>
<i className="material-icons">arrow_back</i>
</div>
</div>
<div className={itemSelected === null ? "marketplace__item__blurb" : "marketplace__item__blurb opened"}>
<div className="blurb__body">
{!!data ? data.map((item, index) =>(
<div
key={index}
className={index === itemSelected ? 'blurb__body__item selected' : "blurb__body__item"}
>
{!!data[0].blurb ? data[0].blurb.map((item, index) =>{
if(item.className === "utilisation"){
return(
<div
key={index}
className={`blurb__body__item__ ${item.className}`}
>
<div className="blurb__body__item__heading">
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
<div>{item.title}</div>
</div>
<div className="item__description__container">
{!!item.domElements ? item.domElements.map((elements, index) =>{
return(
<div key={index}>
<div>{elements.title}</div>
<ol className="square__list">
{!!elements ? elements.elementol.map((elementol, index) =>{
return(
<li key={index} dangerouslySetInnerHTML={{__html: elementol.innerText}}></li>
)
}
): null}
</ol>
</div>
)
}): null}
</div>
</div>
)
}
if(item.className === "support"){
return(
<div
key={index}
className={`blurb__body__item__ ${item.className}`}
>
<div className="blurb__body__item__heading">
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
<div>{item.title}</div>
</div>
<div className="item__description__container">
{!!item.domElements ? item.domElements.map((elements, index) =>{
return(
<div key={index}>
<div>{elements.title}</div>
<ul className="square__list">
{!!elements ? elements.element.map((element, index) =>{
return(
<li key={index} dangerouslySetInnerHTML={{__html: element.innerText}} />
)
}
): null}
</ul>
</div>
)
}): null}
{!!item.textelibrehtmlsupportclient ? item.textelibrehtmlsupportclient.map((elements, index) =>{
return(
<div key={index}>
<span className="supportdescription">
{!!elements ? elements.element.map((element, index) =>{
return(
<span key={index} className="supportdescription" dangerouslySetInnerHTML={{__html: element.description}}/>
)
}
): null}
</span>
</div>
)
}): null}
</div>
</div>
)
}
if(item.className === "qui"){
return(
<div
key={index}
className={`blurb__body__item__ ${item.className}`}
>
<div className="blurb__body__item__heading">
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
<div>{item.title}</div>
</div>
<div className="item__description__container">
{!!item.domElements ? item.domElements.map((elements, index) =>{
return(
<div key={index}>
{!!elements ? elements.element.map((element, index) =>{
return(
<div key={index} className="quidescriptionhtml__container" dangerouslySetInnerHTML={{__html: element.innerText}}/>
)
}
): null}
</div>
)
}): null}
<div className="quidescriptionhtml__container" dangerouslySetInnerHTML={{__html: item.quidescription}}/>
</div>
</div>
)
}
if(item.className === "price"){
return(
<div
key={index}
className={`blurb__body__item__ ${item.className}`}
>
<div className="blurb__body__item__heading">
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
<div>{item.title}</div>
</div>
<div className="item__description__container">
{!!item.domElements ? item.domElements.map((elements, index) =>{
return(
<div key={index} className="price__container">
{!!elements ? elements.element.map((element, index) =>{
/**************************************/
if(element.className=== 'item__captures'){
return(
<div key={index}>
<div className="img___zoomed" onClick={()=>this.setState({priceModal:true})}>
<img key={index} alt="" src={element.element[0].src}/>
</div>
<div
className={this.state.priceModal ? "priceModal opened": "priceModal"}
onClick={this.modalEffectOff.bind(this)}
>
<div className="modal-header" onClick={()=>this.setState({priceModal:false})}><button><span>×</span></button></div>
<div className="img___zoomed">
<img key={index} alt="" src={element.element[0].src}/>
</div>
</div>
</div>
)
}
if(element.className=== 'description'){
return(
<div key={index} className="description" dangerouslySetInnerHTML={{__html: element.innerText}}/>
)
}
if(element.className=== 'price__container'){
return(
<div key={index} className="price__container" dangerouslySetInnerHTML={{__html: element.innerText}}/>
)
}
}
): null}
</div>
)
}
):null}
</div>
</div>
)
}
if(item.className === "benefit"){
return(
<div
key={index}
className={`blurb__body__item__ ${item.className}`}
>
<div className="blurb__body__item__heading">
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
<div>{item.title}</div>
</div>
<div className="item__description__container">
{!!item.domElements ? item.domElements.map((elements, index) =>{
return(
<div key={index} className="blurb__pictos">
{!!elements ? elements.element.map((element, index) =>(
<figure key={index} className="blurb__picto">
<img alt="" src={element.picto}/>
<figcaption>
{element.innerText}
</figcaption>
</figure>
)
): null}
</div>
)
}
):null}
</div>
</div>
)
}
if(item.className === "what"){
return(
<div
key={index}
className={`blurb__body__item__ ${item.className}`}
>
<div className="blurb__body__item__heading">
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
<div>{item.title}</div>
</div>
<div className="item__description__container">
{!!item.domElements ? item.domElements.map((elements, index) =>{
return(
<div key={index} className={elements['className']}>
{!!elements.element ? elements.element.map((element, index)=> {
if(!!element.src){
let src = element.src
let ext = src.substring(src.lastIndexOf('.') + 1)
let exts =['jpg', 'jpeg', 'png', 'svg']
if(exts.indexOf(ext) !== -1){
return(
<div key={index}
className={this.state.zooom === index ? ' blurb__item__img' : "blurb__item__img"}
>
<div>
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
</div>
<div className="img___zoomed">
<div className="overlay" onClick={this.modalEffect.bind(this, index)}></div>
<img alt="" src={src}/>
</div>
</div>
)
}
else{
return(
<div key={index}
onClick={this.modalEffect.bind(this, index)}
className={this.state.zooom === index ? 'zooom blurb__item__img' : "blurb__item__img"}
>
<div className="video__btn"></div>
<div>
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
</div>
<div className="img___zoomed videoWrapper">
<div className="overlay" onClick={this.modalEffect.bind(this, index)}></div>
<iframe src={src}></iframe>
</div>
</div>
)
}
}
if(!!element.innerText){
return(
<div key={index} className="blurb__desciption">
<div>
<div className="close" onClick={ this.modalEffectOff.bind(this)}></div>
</div>
<div>
<ul className="nostyle">
<li className="description__title puce">{element.title}</li>
<li className="description__innerText" dangerouslySetInnerHTML={{__html: element.innerText}}></li>
</ul>
</div>
</div>
)
}
}) : null}
</div>
)}
): null}
</div>
</div>
)
}
}
) : null}
</div>
)
) : null
}
</div>
</div>
</div>
</div>
)
}
}
/*<div
className={this.state.modalopened ? "modalSlider opened": "modalSlider"}
onClick={this.modalEffectOff.bind(this)}
>
<div className="modal-header">
<button><span>×</span></button>
</div>
{!!item.domElements ? item.domElements.map((elements, index) =>{
if(elements.className === "item__captures"){
return(
<div key={index}>
<Swiper key={index} {...this.state.params}>
{!!elements.element ? elements.element.map((element, index)=> {
if(!!element.src){
let src = element.src
let legend = element.legend
let ext = src.substring(src.lastIndexOf('.') + 1)
let exts =['jpg', 'jpeg', 'png', 'svg']
if(exts.indexOf(ext) !== -1){
return(
<div key={index}
className={this.state.zooom === index ? "" : ""}
>
<div>
<img alt="" src={src}/>
<legend>{legend}</legend>
</div>
</div>
)
}
}
}) : null}
</Swiper>
</div>
)
}
}): null}
</div>*/
|
HStore
|
scroll.service.js
|
import { fromEvent, ReplaySubject } from 'rxjs';
import { delay, first, map, startWith, switchMap } from 'rxjs/operators';
export class
|
{
static scroll$ = new ReplaySubject(1);
static scroll(scroll) {
// console.log('ScrollService.scroll', scroll);
this.scroll$.next(scroll);
}
static init$(node) {
let previousY = window.pageYOffset;
const event = { direction: null, scroll: { x: 0, y: 0 }, speed: 0 };
return fromEvent(window, 'DOMContentLoaded').pipe(
// tap(_ => console.log('ScrollService.DOMContentLoaded')),
first(),
delay(1),
switchMap(_ => fromEvent(window, 'scroll')),
startWith(true),
// tap(_ => console.log('ScrollService.scroll')),
map(_ => {
/*
const body = document.querySelector('body');
let previousY = body.scrollTop;
body.addEventListener('scroll', () => {
const y = body.scrollTop;
const direction = y >= previousY ? 'down' : 'up';
if (Math.abs(y - previousY) > 90) {
// console.log('scroll', y, direction);
previousY = y;
event.direction = direction;
event.scroll.y = y;
ScrollService.scroll(event);
}
}, true);
*/
const y = window.pageYOffset;
const direction = y >= previousY ? 'down' : 'up';
// console.log(Math.abs(y - previousY) > 90);
// if (Math.abs(y - previousY) > 90) {
previousY = y;
event.direction = direction;
event.scroll.y = y;
ScrollService.scroll(event);
// }
return event;
}),
);
}
static scrollTo(target, options = { offset: -130 }) {
const body = document.querySelector('body');
const currentTop = body.scrollTop; // window.pageYOffset; // body.scrollTop;
const targetTop = currentTop + target.getBoundingClientRect().top + options.offset;
const distance = targetTop - currentTop;
const o = { pow: 0 };
gsap.set(body, {
'scroll-behavior': 'auto'
});
if (options.disableLerp) {
gsap.set(body, {
'scrollTop': currentTop + distance
});
gsap.set(body, {
'scroll-behavior': 'smooth'
});
} else {
gsap.to(o, {
duration: Math.abs(distance) / 2000,
pow: 1,
ease: Quad.easeOut,
overwrite: 'all',
onUpdate: () => {
gsap.set(body, {
'scrollTop': currentTop + distance * o.pow
});
// window.scrollTo(0, currentTop + distance * o.pow);
},
onComplete: () => {
gsap.set(body, {
'scroll-behavior': 'smooth'
});
}
});
}
// target.scrollIntoView();
}
static scrollToSelector(selector, options) {
const target = document.querySelector(selector);
if (target) {
ScrollService.scrollTo(target, options);
}
}
}
|
ScrollService
|
hetero_ftl_host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import numpy as np
from arch.api.utils import log_utils
from federatedml.evaluation import Evaluation
from federatedml.ftl.data_util.common_data_util import overlapping_samples_converter, load_model_parameters, \
save_model_parameters, create_table, convert_instance_table_to_dict, convert_instance_table_to_array, \
add_random_mask_for_list_of_values, remove_random_mask_from_list_of_values
from federatedml.ftl.data_util.log_util import create_shape_msg
from federatedml.ftl.eggroll_computation.helper import decrypt_matrix
from federatedml.ftl.encrypted_ftl import EncryptedFTLHostModel
from federatedml.ftl.encryption.encryption import generate_encryption_key_pair, decrypt_scalar, decrypt_array
from federatedml.ftl.faster_encrypted_ftl import FasterEncryptedFTLHostModel
from federatedml.ftl.hetero_ftl.hetero_ftl_base import HeteroFTLParty
from federatedml.ftl.plain_ftl import PlainFTLHostModel
from federatedml.param.param import FTLModelParam
from federatedml.util import consts
from federatedml.util.transfer_variable import HeteroFTLTransferVariable
LOGGER = log_utils.getLogger()
class HeteroFTLHost(HeteroFTLParty):
def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroFTLHost, self).__init__()
self.host_model = host
self.model_param = model_param
self.transfer_variable = transfer_variable
self.max_iter = model_param.max_iter
self.n_iter_ = 0
def prepare_data(self, host_data):
LOGGER.info("@ start host prepare data")
host_features_dict, _, host_sample_indexes = convert_instance_table_to_dict(host_data)
host_sample_indexes = np.array(host_sample_indexes)
self._do_remote(host_sample_indexes,
name=self.transfer_variable.host_sample_indexes.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_sample_indexes),
role=consts.GUEST,
idx=-1)
guest_sample_indexes = self._do_get(name=self.transfer_variable.guest_sample_indexes.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_sample_indexes),
idx=-1)[0]
host_features, overlap_indexes, _ = overlapping_samples_converter(host_features_dict, host_sample_indexes,
guest_sample_indexes)
return host_features, overlap_indexes
def classified(self, prob_table, threshold):
"""
convert a probability table into a predicted class table.
"""
predict_table = prob_table.mapValues(lambda x: 1 if x > threshold else 0)
return predict_table
def evaluate(self, labels, pred_prob, pred_labels, evaluate_param):
LOGGER.info("@ start host evaluate")
predict_res = None
if evaluate_param.classi_type == consts.BINARY:
predict_res = pred_prob
elif evaluate_param.classi_type == consts.MULTY:
predict_res = pred_labels
else:
LOGGER.warning("unknown classification type, return None as evaluation results")
eva = Evaluation(evaluate_param.classi_type)
eva_report = eva.report(labels, predict_res, evaluate_param.metrics, evaluate_param.thresholds,
evaluate_param.pos_label)
LOGGER.info("@ evaluation report:" + str(eva_report))
return eva_report
def predict(self, host_data, predict_param):
LOGGER.info("@ start host predict")
features, labels, instances_indexes = convert_instance_table_to_array(host_data)
host_x = np.squeeze(features)
LOGGER.debug("host_x: " + str(host_x.shape))
host_prob = self.host_model.predict(host_x)
self._do_remote(host_prob,
name=self.transfer_variable.host_prob.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_prob),
role=consts.GUEST, idx=-1)
pred_prob = self._do_get(name=self.transfer_variable.pred_prob.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.pred_prob),
idx=-1)[0]
pred_prob = np.squeeze(pred_prob)
LOGGER.debug("pred_prob: " + str(pred_prob.shape))
pred_prob_table = create_table(pred_prob, instances_indexes)
actual_label_table = create_table(labels, instances_indexes)
pred_label_table = self.classified(pred_prob_table, predict_param.threshold)
if predict_param.with_proba:
predict_result = actual_label_table.join(pred_prob_table, lambda label, prob: (label if label > 0 else 0, prob))
predict_result = predict_result.join(pred_label_table, lambda x, y: (x[0], x[1], y))
else:
predict_result = actual_label_table.join(pred_label_table, lambda a_label, p_label: (a_label, None, p_label))
return predict_result
def load_model(self, model_table_name, model_namespace):
LOGGER.info("@ load host model from name/ns" + ", " + str(model_table_name) + ", " + str(model_namespace))
model_parameters = load_model_parameters(model_table_name, model_namespace)
self.host_model.restore_model(model_parameters)
def save_model(self, model_table_name, model_namespace):
LOGGER.info("@ save host model to name/ns" + ", " + str(model_table_name) + ", " + str(model_namespace))
_ = save_model_parameters(self.host_model.get_model_parameters(), model_table_name, model_namespace)
class HeteroPlainFTLHost(HeteroFTLHost):
def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroPlainFTLHost, self).__init__(host, model_param, transfer_variable)
def fit(self, host_data):
LOGGER.info("@ start host fit")
host_x, overlap_indexes = self.prepare_data(host_data)
LOGGER.debug("host_x: " + str(host_x.shape))
LOGGER.debug("overlap_indexes: " + str(len(overlap_indexes)))
self.host_model.set_batch(host_x, overlap_indexes)
while self.n_iter_ < self.max_iter:
host_comp = self.host_model.send_components()
self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_component_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_components(guest_comp)
is_stop = self._do_get(name=self.transfer_variable.is_stopped.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_stopped, self.n_iter_),
idx=-1)[0]
LOGGER.info("@ time: " + str(time.time()) + ", ep: " + str(self.n_iter_) + ", converged: " + str(is_stop))
self.n_iter_ += 1
if is_stop:
break
"""
Centralized encryption scheme with an arbiter in the loop for decryption.
"""
class HeteroEncryptFTLHost(HeteroFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: EncryptedFTLHostModel = host
def _precompute(self):
pass
def fit(self, host_data):
LOGGER.info("@ start host fit")
# get public key from arbiter
public_key = self._do_get(name=self.transfer_variable.paillier_pubkey.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey),
idx=-1)[0]
host_x, overlap_indexes = self.prepare_data(host_data)
LOGGER.debug("host_x: " + str(host_x.shape))
LOGGER.debug("overlap_indexes: " + str(len(overlap_indexes)))
self.host_model.set_batch(host_x, overlap_indexes)
self.host_model.set_public_key(public_key)
start_time = time.time()
while self.n_iter_ < self.max_iter:
host_comp = self.host_model.send_components()
self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_component_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_components(guest_comp)
self._precompute()
encrypt_host_gradients = self.host_model.send_gradients()
self._do_remote(encrypt_host_gradients, name=self.transfer_variable.encrypt_host_gradient.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.encrypt_host_gradient, self.n_iter_),
role=consts.ARBITER,
idx=-1)
decrypt_host_gradients = self._do_get(name=self.transfer_variable.decrypt_host_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.decrypt_host_gradient, self.n_iter_),
idx=-1)[0]
self.host_model.receive_gradients(decrypt_host_gradients)
is_stop = self._do_get(name=self.transfer_variable.is_encrypted_ftl_stopped.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_encrypted_ftl_stopped, self.n_iter_),
idx=-1)[0]
LOGGER.info("@ time: " + str(time.time()) + ", ep: " + str(self.n_iter_) + ", converged: " + str(is_stop))
self.n_iter_ += 1
if is_stop:
break
end_time = time.time()
LOGGER.info("@ running time: " + str(end_time - start_time))
class FasterHeteroEncryptFTLHost(HeteroEncryptFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(FasterHeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: FasterEncryptedFTLHostModel = host
def _precompute(self):
LOGGER
|
Decentralized encryption scheme without arbiter in the loop.
"""
class HeteroDecentralizedEncryptFTLHost(HeteroFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: EncryptedFTLHostModel = host
self.public_key = None
self.private_key = None
self.guest_public_key = None
def _precompute(self):
pass
def prepare_encryption_key_pair(self):
LOGGER.info("@ start host prepare encryption key pair")
self.public_key, self.private_key = generate_encryption_key_pair()
# exchange public_key with guest
self._do_remote(self.public_key, name=self.transfer_variable.host_public_key.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_public_key,
self.n_iter_),
role=consts.GUEST,
idx=-1)
self.guest_public_key = self._do_get(name=self.transfer_variable.guest_public_key.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_public_key, self.n_iter_),
idx=-1)[0]
def fit(self, host_data):
LOGGER.info("@ start host fit")
self.prepare_encryption_key_pair()
host_x, overlap_indexes = self.prepare_data(host_data)
LOGGER.debug("host_x: " + str(host_x.shape))
LOGGER.debug("overlap_indexes: " + str(len(overlap_indexes)))
self.host_model.set_batch(host_x, overlap_indexes)
self.host_model.set_public_key(self.public_key)
self.host_model.set_guest_public_key(self.guest_public_key)
self.host_model.set_private_key(self.private_key)
start_time = time.time()
while self.n_iter_ < self.max_iter:
# Stage 1: compute and encrypt components (using host public key) required by guest to
# calculate gradients and loss.
LOGGER.debug("@ Stage 1: ")
host_comp = self.host_model.send_components()
LOGGER.debug("send enc host_comp: " + create_shape_msg(host_comp))
self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),
role=consts.GUEST,
idx=-1)
# Stage 2: receive guest components in encrypted form (encrypted by guest public key),
# and calculate host gradients in encrypted form (encrypted by guest public key),
# and send them to guest for decryption
LOGGER.debug("@ Stage 2: ")
guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_component_list, self.n_iter_),
idx=-1)[0]
LOGGER.debug("receive enc guest_comp: " + create_shape_msg(guest_comp))
self.host_model.receive_components(guest_comp)
self._precompute()
# calculate host gradients in encrypted form (encrypted by guest public key)
encrypt_host_gradients = self.host_model.send_gradients()
LOGGER.debug("send encrypt_guest_gradients: " + create_shape_msg(encrypt_host_gradients))
# add random mask to encrypt_host_gradients and send them to guest for decryption
masked_enc_host_gradients, gradients_masks = add_random_mask_for_list_of_values(encrypt_host_gradients)
LOGGER.debug("send masked_enc_host_gradients: " + create_shape_msg(masked_enc_host_gradients))
self._do_remote(masked_enc_host_gradients, name=self.transfer_variable.masked_enc_host_gradients.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_host_gradients, self.n_iter_),
role=consts.GUEST,
idx=-1)
# Stage 3: receive and then decrypt masked encrypted guest gradients and masked encrypted guest loss,
# and send them to guest
LOGGER.debug("@ Stage 3: ")
masked_enc_guest_gradients = self._do_get(name=self.transfer_variable.masked_enc_guest_gradients.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_guest_gradients, self.n_iter_),
idx=-1)[0]
masked_enc_guest_loss = self._do_get(name=self.transfer_variable.masked_enc_loss.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_loss, self.n_iter_),
idx=-1)[0]
masked_dec_guest_gradients = self.__decrypt_gradients(masked_enc_guest_gradients)
masked_dec_guest_loss = self.__decrypt_loss(masked_enc_guest_loss)
LOGGER.debug("send masked_dec_guest_gradients: " + create_shape_msg(masked_dec_guest_gradients))
self._do_remote(masked_dec_guest_gradients, name=self.transfer_variable.masked_dec_guest_gradients.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_guest_gradients, self.n_iter_),
role=consts.GUEST,
idx=-1)
LOGGER.debug("send masked_dec_guest_loss: " + str(masked_dec_guest_loss))
self._do_remote(masked_dec_guest_loss, name=self.transfer_variable.masked_dec_loss.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_loss, self.n_iter_),
role=consts.GUEST,
idx=-1)
# Stage 4: receive masked but decrypted host gradients from guest and remove mask,
# and update host model parameters using these gradients.
LOGGER.debug("@ Stage 4: ")
masked_dec_host_gradients = self._do_get(name=self.transfer_variable.masked_dec_host_gradients.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.masked_dec_host_gradients, self.n_iter_),
idx=-1)[0]
LOGGER.debug("receive masked_dec_host_gradients: " + create_shape_msg(masked_dec_host_gradients))
cleared_dec_host_gradients = remove_random_mask_from_list_of_values(masked_dec_host_gradients, gradients_masks)
# update host model parameters using these gradients.
self.host_model.receive_gradients(cleared_dec_host_gradients)
# Stage 5: determine whether training is terminated.
LOGGER.debug("@ Stage 5: ")
is_stop = self._do_get(name=self.transfer_variable.is_decentralized_enc_ftl_stopped.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_decentralized_enc_ftl_stopped, self.n_iter_),
idx=-1)[0]
LOGGER.info("@ time: " + str(time.time()) + ", ep: " + str(self.n_iter_) + ", converged: " + str(is_stop))
self.n_iter_ += 1
if is_stop:
break
end_time = time.time()
LOGGER.info("@ running time: " + str(end_time - start_time))
def __decrypt_gradients(self, encrypt_gradients):
return decrypt_matrix(self.private_key, encrypt_gradients[0]), decrypt_array(self.private_key, encrypt_gradients[1])
def __decrypt_loss(self, encrypt_loss):
return decrypt_scalar(self.private_key, encrypt_loss)
class FasterHeteroDecentralizedEncryptFTLHost(HeteroDecentralizedEncryptFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(FasterHeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: FasterEncryptedFTLHostModel = host
def _precompute(self):
LOGGER.debug("@ start precompute")
host_precomputed_comp = self.host_model.send_precomputed_components()
self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,
self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_precomputed_components(guest_precomputed_comp)
class HostFactory(object):
@classmethod
def create(cls, ftl_model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable, ftl_local_model):
if ftl_model_param.is_encrypt:
if ftl_model_param.enc_ftl == "dct_enc_ftl":
# decentralized encrypted ftl host
LOGGER.debug("@ create decentralized encrypted ftl_host")
host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = HeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
elif ftl_model_param.enc_ftl == "dct_enc_ftl2":
# decentralized encrypted faster ftl host
LOGGER.debug("@ create decentralized encrypted faster ftl_host")
host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = FasterHeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
elif ftl_model_param.enc_ftl == "enc_ftl2":
# encrypted faster ftl host
LOGGER.debug("@ create encrypted faster ftl_host")
host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = FasterHeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
else:
# encrypted ftl host
LOGGER.debug("@ create encrypted ftl_host")
host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = HeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
else:
# plain ftl host
LOGGER.debug("@ create plain ftl_host")
host_model = PlainFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = HeteroPlainFTLHost(host_model, ftl_model_param, transfer_variable)
return host
|
.info("@ start host precompute")
host_precomputed_comp = self.host_model.send_precomputed_components()
self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,
self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_precomputed_components(guest_precomputed_comp)
"""
|
ui_scaler.rs
|
use crate::structure::Size;
use codegen::{Animation, LuaComponent};
use mlua::prelude::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum
|
{
Constant,
Stretch,
Fit,
Fill,
MatchWidth,
MatchHeight,
}
impl<'lua> FromLua<'lua> for UIScaleMode {
fn from_lua(value: LuaValue<'lua>, lua: &'lua Lua) -> LuaResult<Self> {
let str = String::from_lua(value, lua)?;
let str = str.as_str();
match str {
"constant" => Ok(UIScaleMode::Constant),
"stretch" => Ok(UIScaleMode::Stretch),
"fit" => Ok(UIScaleMode::Fit),
"fill" => Ok(UIScaleMode::Fill),
"match-width" => Ok(UIScaleMode::MatchWidth),
"match-height" => Ok(UIScaleMode::MatchHeight),
_ => Err(
format!("{:?} is invalid value for the type {}", str, "UIScaleMode",).to_lua_err(),
),
}
}
}
impl<'lua> ToLua<'lua> for UIScaleMode {
fn to_lua(self, lua: &'lua Lua) -> LuaResult<LuaValue<'lua>> {
Ok(LuaValue::String(lua.create_string(match self {
UIScaleMode::Constant => "constant",
UIScaleMode::Stretch => "stretch",
UIScaleMode::Fit => "fit",
UIScaleMode::Fill => "fill",
UIScaleMode::MatchWidth => "match-width",
UIScaleMode::MatchHeight => "match-height",
})?))
}
}
#[derive(Animation, LuaComponent)]
pub struct UIScaler {
pub mode: UIScaleMode,
pub reference_size: Size,
}
impl UIScaler {
pub fn new(mode: UIScaleMode, reference_size: Size) -> Self {
Self {
mode,
reference_size,
}
}
}
|
UIScaleMode
|
03 Spaces.py
|
print("second = hour * 60 * 60")
|
||
queue_submit.rs
|
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::ptr;
use command_buffer::sys::UnsafeCommandBuffer;
use device::Queue;
use sync::Fence;
use sync::PipelineStages;
use sync::Semaphore;
use check_errors;
use vk;
use Error;
use OomError;
use SynchronizedVulkanObject;
use VulkanObject;
/// Prototype for a submission that executes command buffers.
// TODO: example here
#[derive(Debug)]
pub struct SubmitCommandBufferBuilder<'a> {
wait_semaphores: SmallVec<[vk::Semaphore; 16]>,
destination_stages: SmallVec<[vk::PipelineStageFlags; 8]>,
signal_semaphores: SmallVec<[vk::Semaphore; 16]>,
command_buffers: SmallVec<[vk::CommandBuffer; 4]>,
fence: vk::Fence,
marker: PhantomData<&'a ()>,
}
impl<'a> SubmitCommandBufferBuilder<'a> {
/// Builds a new empty `SubmitCommandBufferBuilder`.
#[inline]
pub fn new() -> SubmitCommandBufferBuilder<'a> {
SubmitCommandBufferBuilder {
wait_semaphores: SmallVec::new(),
destination_stages: SmallVec::new(),
signal_semaphores: SmallVec::new(),
command_buffers: SmallVec::new(),
fence: 0,
marker: PhantomData,
}
}
/// Returns true if this builder will signal a fence when submitted.
///
/// # Example
///
/// ```
/// use vulkano::command_buffer::submit::SubmitCommandBufferBuilder;
/// use vulkano::sync::Fence;
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
///
/// unsafe {
/// let fence = Fence::from_pool(device.clone()).unwrap();
///
/// let mut builder = SubmitCommandBufferBuilder::new();
/// assert!(!builder.has_fence());
/// builder.set_fence_signal(&fence);
/// assert!(builder.has_fence());
/// }
/// ```
#[inline]
pub fn has_fence(&self) -> bool {
self.fence != 0
}
/// Adds an operation that signals a fence after this submission ends.
///
/// # Example
///
/// ```
/// use std::time::Duration;
/// use vulkano::command_buffer::submit::SubmitCommandBufferBuilder;
/// use vulkano::sync::Fence;
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// # let queue: std::sync::Arc<vulkano::device::Queue> = return;
///
/// unsafe {
/// let fence = Fence::from_pool(device.clone()).unwrap();
///
/// let mut builder = SubmitCommandBufferBuilder::new();
/// builder.set_fence_signal(&fence);
///
/// builder.submit(&queue).unwrap();
///
/// // We must not destroy the fence before it is signaled.
/// fence.wait(Some(Duration::from_secs(5))).unwrap();
/// }
/// ```
///
/// # Safety
///
/// - The fence must not be signaled at the time when you call `submit()`.
///
/// - If you use the fence for multiple submissions, only one at a time must be executed by the
/// GPU. In other words, you must submit one, wait for the fence to be signaled, then reset
/// the fence, and then only submit the second.
///
/// - If you submit this builder, the fence must be kept alive until it is signaled by the GPU.
/// Destroying the fence earlier is an undefined behavior.
///
/// - The fence, command buffers, and semaphores must all belong to the same device.
///
#[inline]
pub unsafe fn set_fence_signal(&mut self, fence: &'a Fence) {
self.fence = fence.internal_object();
}
/// Adds a semaphore to be waited upon before the command buffers are executed.
///
/// Only the given `stages` of the command buffers added afterwards will wait upon
/// the semaphore. Other stages not included in `stages` can execute before waiting.
///
/// # Safety
///
/// - The stages must be supported by the device.
///
/// - If you submit this builder, the semaphore must be kept alive until you are guaranteed
/// that the GPU has at least started executing the command buffers.
///
/// - If you submit this builder, no other queue must be waiting on these semaphores. In other
/// words, each semaphore signal can only correspond to one semaphore wait.
///
/// - If you submit this builder, the semaphores must be signaled when the queue execution
/// reaches this submission, or there must be one or more submissions in queues that are
/// going to signal these semaphores. In other words, you must not block the queue with
/// semaphores that can't get signaled.
///
/// - The fence, command buffers, and semaphores must all belong to the same device.
///
#[inline]
pub unsafe fn add_wait_semaphore(&mut self, semaphore: &'a Semaphore, stages: PipelineStages) {
debug_assert!(stages.into_vulkan_bits() != 0);
// TODO: debug assert that the device supports the stages
self.wait_semaphores.push(semaphore.internal_object());
self.destination_stages.push(stages.into_vulkan_bits());
}
/// Adds a command buffer that is executed as part of this command.
///
/// The command buffers are submitted in the order in which they are added.
///
/// # Safety
///
/// - If you submit this builder, the command buffer must be kept alive until you are
/// guaranteed that the GPU has finished executing it.
///
/// - Any calls to vkCmdSetEvent, vkCmdResetEvent or vkCmdWaitEvents that have been recorded
/// into the command buffer must not reference any VkEvent that is referenced by any of
/// those commands that is pending execution on another queue.
/// TODO: rephrase ^ ?
///
/// - The fence, command buffers, and semaphores must all belong to the same device.
///
/// TODO: more here
///
#[inline]
pub unsafe fn add_command_buffer(&mut self, command_buffer: &'a UnsafeCommandBuffer) {
self.command_buffers.push(command_buffer.internal_object());
}
/// Returns the number of semaphores to signal.
///
/// In other words, this is the number of times `add_signal_semaphore` has been called.
#[inline]
pub fn num_signal_semaphores(&self) -> usize {
self.signal_semaphores.len()
}
/// Adds a semaphore that is going to be signaled at the end of the submission.
///
/// # Safety
///
/// - If you submit this builder, the semaphore must be kept alive until you are guaranteed
/// that the GPU has finished executing this submission.
///
/// - The semaphore must be in the unsignaled state when queue execution reaches this
/// submission.
///
/// - The fence, command buffers, and semaphores must all belong to the same device.
///
#[inline]
pub unsafe fn add_signal_semaphore(&mut self, semaphore: &'a Semaphore) {
self.signal_semaphores.push(semaphore.internal_object());
}
/// Submits the command buffer to the given queue.
///
/// > **Note**: This is an expensive operation, so you may want to merge as many builders as
/// > possible together and avoid submitting them one by one.
///
pub fn submit(self, queue: &Queue) -> Result<(), SubmitCommandBufferError> {
unsafe {
let vk = queue.device().pointers();
let queue = queue.internal_object_guard();
debug_assert_eq!(self.wait_semaphores.len(), self.destination_stages.len());
let batch = vk::SubmitInfo {
sType: vk::STRUCTURE_TYPE_SUBMIT_INFO,
pNext: ptr::null(),
waitSemaphoreCount: self.wait_semaphores.len() as u32,
pWaitSemaphores: self.wait_semaphores.as_ptr(),
pWaitDstStageMask: self.destination_stages.as_ptr(),
commandBufferCount: self.command_buffers.len() as u32,
pCommandBuffers: self.command_buffers.as_ptr(),
signalSemaphoreCount: self.signal_semaphores.len() as u32,
pSignalSemaphores: self.signal_semaphores.as_ptr(),
};
check_errors(vk.QueueSubmit(*queue, 1, &batch, self.fence))?;
Ok(())
}
}
/// Merges this builder with another builder.
///
/// # Panic
///
/// Panics if both builders have a fence already set.
// TODO: create multiple batches instead
pub fn merge(mut self, other: Self) -> Self {
assert!(
self.fence == 0 || other.fence == 0,
"Can't merge two queue submits that both have a fence"
);
self.wait_semaphores.extend(other.wait_semaphores);
self.destination_stages.extend(other.destination_stages); // TODO: meh? will be solved if we submit multiple batches
self.signal_semaphores.extend(other.signal_semaphores);
self.command_buffers.extend(other.command_buffers);
if self.fence == 0 {
self.fence = other.fence;
}
self
}
}
|
pub enum SubmitCommandBufferError {
/// Not enough memory.
OomError(OomError),
/// The connection to the device has been lost.
DeviceLost,
}
impl error::Error for SubmitCommandBufferError {
#[inline]
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
SubmitCommandBufferError::OomError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for SubmitCommandBufferError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
SubmitCommandBufferError::OomError(_) => "not enough memory",
SubmitCommandBufferError::DeviceLost =>
"the connection to the device has been lost",
}
)
}
}
impl From<Error> for SubmitCommandBufferError {
#[inline]
fn from(err: Error) -> SubmitCommandBufferError {
match err {
err @ Error::OutOfHostMemory => SubmitCommandBufferError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => {
SubmitCommandBufferError::OomError(OomError::from(err))
}
Error::DeviceLost => SubmitCommandBufferError::DeviceLost,
_ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
use sync::Fence;
#[test]
fn empty_submit() {
let (device, queue) = gfx_dev_and_queue!();
let builder = SubmitCommandBufferBuilder::new();
builder.submit(&queue).unwrap();
}
#[test]
fn signal_fence() {
unsafe {
let (device, queue) = gfx_dev_and_queue!();
let fence = Fence::alloc(device.clone()).unwrap();
assert!(!fence.ready().unwrap());
let mut builder = SubmitCommandBufferBuilder::new();
builder.set_fence_signal(&fence);
builder.submit(&queue).unwrap();
fence.wait(Some(Duration::from_secs(5))).unwrap();
assert!(fence.ready().unwrap());
}
}
#[test]
fn has_fence() {
unsafe {
let (device, queue) = gfx_dev_and_queue!();
let fence = Fence::alloc(device.clone()).unwrap();
let mut builder = SubmitCommandBufferBuilder::new();
assert!(!builder.has_fence());
builder.set_fence_signal(&fence);
assert!(builder.has_fence());
}
}
#[test]
fn merge_both_have_fences() {
unsafe {
let (device, _) = gfx_dev_and_queue!();
let fence1 = Fence::alloc(device.clone()).unwrap();
let fence2 = Fence::alloc(device.clone()).unwrap();
let mut builder1 = SubmitCommandBufferBuilder::new();
builder1.set_fence_signal(&fence1);
let mut builder2 = SubmitCommandBufferBuilder::new();
builder2.set_fence_signal(&fence2);
assert_should_panic!("Can't merge two queue submits that both have a fence", {
let _ = builder1.merge(builder2);
});
}
}
}
|
/// Error that can happen when submitting the prototype.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
|
fsm.py
|
"""
Provides the FSM class, a rudimentary implementation of a Finite State Machine.
"""
from typing import Any, Callable, Dict, List, Optional, Tuple
import sdl2
from .tools.common import to_snake_case
__author__ = 'Tiziano Bettio'
__license__ = 'MIT'
__version__ = '0.1'
__copyright__ = """Copyright (c) 2020 Tiziano Bettio
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
class FSM:
"""
Rudimentary Finite State Machine to organize state changes. If both a
`enter_[my_class_name]` and `exit_[my_class_name]` are provided in a
subclass of FSM, it will become a state that can be activated through the
:meth:`self.request` method. `[my_class_name]` is a snake_case
representation of the name of the subclass. It assumes PascalCase for class
names (i.e. `MyClass` -> `my_class`). Use
:func:`~foolysh.tools.common.to_snake_case` with your class name as
parameter to determine the proper state name.
When :meth:`FSM.request` is called the following actions are performed:
1. The `exit_` method from the current state gets called.
2. The `enter_` method from the requested state gets called.
.. note::
Only provide `enter_` / `exit_` methods for subclasses that should be
callable states.
"""
__states: Dict[str, Tuple[Callable, Callable]] = None
__active_state: Optional[str] = None
__history: List[str] = None
__fsm_data: Dict[str, Any] = None
def __setup_fsm(self):
mro = [i.__name__ for i in self.__class__.__mro__]
mro = mro[:mro.index('FSM')]
self.__states = {}
self.__history = []
self.__fsm_data = {}
self.__fsm_data['-global-'] = {}
for i in mro:
name = to_snake_case(i)
enterm = getattr(self, f'enter_{name}', False)
exitm = getattr(self, f'exit_{name}', False)
if enterm and exitm:
self.__states[name] = enterm, exitm
self.__fsm_data[name] = None
else:
Warning(f'Class "{i}" does not expose enter and exit methods. '
f'State not registered!')
def request(self, state_name: str, back: bool = False) -> None:
"""
Performs the transition to a registered State. Raises a ValueError if
the provided `state_name` is not registered.
"""
if not self.__states:
self.__setup_fsm()
if state_name not in self.__states:
raise ValueError(f'Unknown state "{state_name}".')
if self.__active_state == state_name:
return
if self.__active_state is not None:
self.__states[self.__active_state][1]()
if not back:
self.__history.append(self.__active_state)
self.__active_state = state_name
self.__states[state_name][0]()
sdl2.SDL_StopTextInput() # Ensure on-screen kbd gets hidden
def fsm_back(self) -> None:
"""
Performs the transition to the last known state in the history. Does
nothing if the history is empty.
"""
if not self.__history:
return
self.request(self.__history.pop(), True)
@property
|
@property
def previous_state(self) -> str:
"""The previous state before the last transition, or ``None``."""
if self.__history:
return self.__history[-1]
return None
@property
def fsm_data(self) -> Any:
"""The FSM data stored for the active state."""
if self.__active_state is None:
raise ValueError(f'No state set yet.')
if self.__fsm_data[self.__active_state] is None:
self.__fsm_data[self.__active_state] = {}
return self.__fsm_data[self.__active_state]
@property
def fsm_global_data(self) -> Dict[str, Any]:
"""
A data dict accessible from any state, potentially useful for passing
information between states.
"""
return self.__fsm_data['-global-']
|
def active_state(self) -> str:
"""The currently active state."""
return self.__active_state
|
mod.rs
|
//! Logic for working with attributes under a shared timestamp
//! semantics.
use std::collections::HashMap;
use timely::dataflow::operators::{Probe, UnorderedInput};
use timely::dataflow::{ProbeHandle, Scope, ScopeParent, Stream};
use timely::progress::frontier::AntichainRef;
use timely::progress::Timestamp;
use differential_dataflow::lattice::Lattice;
use differential_dataflow::operators::arrange::Arrange;
use differential_dataflow::operators::Threshold;
use differential_dataflow::trace::TraceReader;
use differential_dataflow::AsCollection;
use crate::operators::LastWriteWins;
use crate::{Aid, Error, Rewind, TxData, Value};
use crate::{AttributeConfig, IndexDirection, InputSemantics, QuerySupport};
use crate::{RelationConfig, RelationHandle};
use crate::{TraceKeyHandle, TraceValHandle};
mod unordered_session;
use unordered_session::UnorderedSession;
/// A domain manages attributes that share a timestamp semantics. Each
/// attribute within a domain can be either fed from an external
/// system, or from user transactions. The former are referred to as
/// *sourced*, the latter as *transactable* attributes.
///
/// Both types of input must make sure not to block overall domain
/// progress, s.t. results can be revealed and traces can be
/// compacted. For attributes with an opinion on time, users and
/// source operators are required to regularly downgrade their
/// capabilities. As they do so, the domain frontier advances.
///
/// Some attributes do not care about time. Such attributes want their
/// information to be immediately available to all
/// queries. Conceptually, they want all their inputs to happen at
/// t0. This is however not a practical solution, because holding
/// capabilities for t0 in perpetuity completely stalls monotemporal
/// domains and prevents trace compaction in multitemporal ones. We
/// refer to this type of attributes as *timeless*. Instead, timeless
/// attributes must be automatically advanced in lockstep with a
/// high-watermark of all timeful domain inputs. This ensures that
/// they will never block overall progress.
pub struct Domain<T: Timestamp + Lattice> {
/// The current input epoch.
now_at: T,
/// Last trace advance.
last_advance: Vec<T>,
/// Input handles to attributes in this domain.
input_sessions: HashMap<String, UnorderedSession<T, (Value, Value), isize>>,
/// The probe keeping track of source progress in this domain.
domain_probe: ProbeHandle<T>,
/// Maintaining the number of probed sources allows us to
/// distinguish between a domain without sources, and one where
/// sources have ceased producing inputs.
probed_source_count: usize,
/// Configurations for attributes in this domain.
pub attributes: HashMap<Aid, AttributeConfig>,
/// Forward count traces.
pub forward_count: HashMap<Aid, TraceKeyHandle<Value, T, isize>>,
/// Forward propose traces.
pub forward_propose: HashMap<Aid, TraceValHandle<Value, Value, T, isize>>,
/// Forward validate traces.
pub forward_validate: HashMap<Aid, TraceKeyHandle<(Value, Value), T, isize>>,
/// Reverse count traces.
pub reverse_count: HashMap<Aid, TraceKeyHandle<Value, T, isize>>,
/// Reverse propose traces.
pub reverse_propose: HashMap<Aid, TraceValHandle<Value, Value, T, isize>>,
/// Reverse validate traces.
pub reverse_validate: HashMap<Aid, TraceKeyHandle<(Value, Value), T, isize>>,
/// Configuration for relations in this domain.
pub relations: HashMap<Aid, RelationConfig>,
/// Relation traces.
pub arrangements: HashMap<Aid, RelationHandle<T>>,
}
impl<T> Domain<T>
where
T: Timestamp + Lattice + Rewind,
{
/// Creates a new domain.
pub fn new(start_at: T) -> Self {
Domain {
now_at: start_at,
last_advance: vec![<T as Lattice>::minimum()],
input_sessions: HashMap::new(),
domain_probe: ProbeHandle::new(),
probed_source_count: 0,
attributes: HashMap::new(),
forward_count: HashMap::new(),
forward_propose: HashMap::new(),
forward_validate: HashMap::new(),
reverse_count: HashMap::new(),
reverse_propose: HashMap::new(),
reverse_validate: HashMap::new(),
relations: HashMap::new(),
arrangements: HashMap::new(),
}
}
/// Creates an attribute from a stream of (key,value)
/// pairs. Applies operators to enforce input semantics, registers
/// the attribute configuration, and installs appropriate indices.
fn create_attribute<S: Scope + ScopeParent<Timestamp = T>>(
&mut self,
name: &str,
config: AttributeConfig,
pairs: &Stream<S, ((Value, Value), T, isize)>,
) -> Result<(), Error> {
if self.attributes.contains_key(name) {
Err(Error::conflict(format!(
"An attribute of name {} already exists.",
name
)))
} else {
let tuples = match config.input_semantics {
InputSemantics::Raw => pairs.as_collection(),
InputSemantics::LastWriteWins => pairs.as_collection().last_write_wins(),
// Ensure that redundant (e,v) pairs don't cause
// misleading proposals during joining.
InputSemantics::Distinct => pairs.as_collection().distinct(),
};
// @TODO should only create this if used later
let tuples_reverse = tuples.map(|(e, v)| (v, e));
// Propose traces are used in general, whereas the other
// indices are only relevant to Hector.
self.forward_propose.insert(
name.to_string(),
tuples.arrange_named(&format!("->Propose({})", &name)).trace,
);
if config.index_direction == IndexDirection::Both {
self.reverse_propose.insert(
name.to_string(),
tuples_reverse
.arrange_named(&format!("->_Propose({})", &name))
.trace,
);
}
// LastWriteWins is a special case, because count,
// propose, and validate are all essentially the same.
if config.input_semantics != InputSemantics::LastWriteWins {
// Count traces are only required for use in
// worst-case optimal joins.
if config.query_support == QuerySupport::AdaptiveWCO {
self.forward_count.insert(
name.to_string(),
tuples
.map(|(k, _v)| (k, ()))
.arrange_named(&format!("->Count({})", name))
.trace,
);
if config.index_direction == IndexDirection::Both {
self.reverse_count.insert(
name.to_string(),
tuples_reverse
.map(|(k, _v)| (k, ()))
.arrange_named(&format!("->_Count({})", name))
.trace,
);
}
}
if config.query_support >= QuerySupport::Delta {
self.forward_validate.insert(
name.to_string(),
tuples
.map(|t| (t, ()))
.arrange_named(&format!("->Validate({})", &name))
.trace,
);
if config.index_direction == IndexDirection::Both {
self.reverse_validate.insert(
name.to_string(),
tuples_reverse
.map(|t| (t, ()))
.arrange_named(&format!("->_Validate({})", &name))
.trace,
);
}
}
}
// This is crucial. If we forget to install the attribute
// configuration, its traces will be ignored when
// advancing the domain.
|
Ok(())
}
}
/// Creates an attribute that can be transacted upon by clients.
pub fn create_transactable_attribute<S: Scope<Timestamp = T>>(
&mut self,
name: &str,
config: AttributeConfig,
scope: &mut S,
) -> Result<(), Error> {
let pairs = {
let ((handle, cap), pairs) = scope.new_unordered_input::<((Value, Value), T, isize)>();
let session = UnorderedSession::from(handle, cap);
self.input_sessions.insert(name.to_string(), session);
pairs
};
// We do not want to probe transactable attributes, because
// the domain epoch is authoritative for them.
self.create_attribute(name, config, &pairs)?;
Ok(())
}
/// Creates an attribute that is controlled by a source and thus
/// can not be transacted upon by clients.
pub fn create_sourced_attribute<S: Scope + ScopeParent<Timestamp = T>>(
&mut self,
name: &str,
config: AttributeConfig,
pairs: &Stream<S, ((Value, Value), T, isize)>,
) -> Result<(), Error> {
// We need to install a probe on source-fed attributes in
// order to determine their progress.
// We do not want to probe timeless attributes.
// Sources of timeless attributes either are not able to or do not
// want to provide valid domain timestamps.
// Forcing to probe them would stall progress in the system.
let source_pairs = if config.timeless {
pairs.to_owned()
} else {
self.probed_source_count += 1;
pairs.probe_with(&mut self.domain_probe)
};
self.create_attribute(name, config, &source_pairs)?;
Ok(())
}
/// Inserts a new named relation.
pub fn register_arrangement(
&mut self,
name: String,
config: RelationConfig,
trace: RelationHandle<T>,
) {
self.relations.insert(name.clone(), config);
self.arrangements.insert(name, trace);
}
/// Transact data into one or more inputs.
pub fn transact(&mut self, tx_data: Vec<TxData>) -> Result<(), Error> {
// @TODO do this smarter, e.g. grouped by handle
for TxData(op, e, a, v, t) in tx_data {
match self.input_sessions.get_mut(&a) {
None => {
return Err(Error::not_found(format!("Attribute {} does not exist.", a)));
}
Some(handle) => match t {
None => handle.update((e, v), op),
Some(t) => handle.update_at((e, v), t.into(), op),
},
}
}
Ok(())
}
/// Closes and drops an existing input.
pub fn close_input(&mut self, name: String) -> Result<(), Error> {
match self.input_sessions.remove(&name) {
None => Err(Error::not_found(format!("Input {} does not exist.", name))),
Some(handle) => {
handle.close();
Ok(())
}
}
}
/// Advances the domain to the current domain frontier, thus
/// allowing traces to compact. All domain input handles are
/// forwarded up to the frontier, so as not to stall progress.
pub fn advance(&mut self) -> Result<(), Error> {
if self.probed_source_count() == 0 {
// No sources registered.
self.advance_traces(&[self.epoch().clone()])
} else {
let frontier = self
.domain_probe
.with_frontier(|frontier| (*frontier).to_vec());
if frontier.is_empty() {
// Even if all sources dropped their capabilities we
// still want to advance all traces to the current
// epoch, s.t. user created attributes are
// continuously advanced and compacted.
self.advance_traces(&[self.epoch().clone()])
} else {
if !AntichainRef::new(&frontier).less_equal(self.epoch()) {
// Input handles have fallen behind the sources and need
// to be advanced, such as not to block progress.
let max = frontier.iter().max().unwrap().clone();
self.advance_epoch(max)?;
}
self.advance_traces(&frontier)
}
}
}
/// Advances the domain epoch. The domain epoch can be in advance
/// of or lag behind the domain frontier. It is used by timeless
/// attributes to avoid stalling timeful inputs.
pub fn advance_epoch(&mut self, next: T) -> Result<(), Error> {
if !self.now_at.less_equal(&next) {
// We can't rewind time.
Err(Error::conflict(format!(
"Domain is at {:?}, you attempted to rewind to {:?}.",
&self.now_at, &next
)))
} else if !self.now_at.eq(&next) {
trace!("Advancing domain epoch to {:?} ", next);
for handle in self.input_sessions.values_mut() {
handle.advance_to(next.clone());
handle.flush();
}
self.now_at = next;
Ok(())
} else {
Ok(())
}
}
/// Advances domain traces up to the specified frontier minus
/// their configured slack.
pub fn advance_traces(&mut self, frontier: &[T]) -> Result<(), Error> {
let last_advance = AntichainRef::new(&self.last_advance);
if frontier.iter().any(|t| last_advance.less_than(t)) {
trace!("Advancing traces to {:?}", frontier);
self.last_advance = frontier.to_vec();
let frontier = AntichainRef::new(frontier);
for (aid, config) in self.attributes.iter() {
if let Some(ref trace_slack) = config.trace_slack {
let slacking_frontier = frontier
.iter()
.map(|t| t.rewind(trace_slack.clone().into()))
.collect::<Vec<T>>();;
if let Some(trace) = self.forward_count.get_mut(aid) {
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
if let Some(trace) = self.forward_propose.get_mut(aid) {
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
if let Some(trace) = self.forward_validate.get_mut(aid) {
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
if let Some(trace) = self.reverse_count.get_mut(aid) {
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
if let Some(trace) = self.reverse_propose.get_mut(aid) {
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
if let Some(trace) = self.reverse_validate.get_mut(aid) {
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
}
}
for (name, config) in self.relations.iter() {
if let Some(ref trace_slack) = config.trace_slack {
let slacking_frontier = frontier
.iter()
.map(|t| t.rewind(trace_slack.clone().into()))
.collect::<Vec<T>>();
let trace = self.arrangements.get_mut(name).unwrap_or_else(|| {
panic!("Configuration available for unknown relation {}", name)
});
trace.advance_by(&slacking_frontier);
trace.distinguish_since(&slacking_frontier);
}
}
}
Ok(())
}
/// Returns a handle to the domain's input probe.
pub fn domain_probe(&self) -> &ProbeHandle<T> {
&self.domain_probe
}
/// Reports the current input epoch.
pub fn epoch(&self) -> &T {
&self.now_at
}
/// Reports the number of probed (timeful) sources in the domain.
pub fn probed_source_count(&self) -> usize {
self.probed_source_count
}
/// Returns true iff the frontier dominates all domain inputs.
pub fn dominates(&self, frontier: AntichainRef<T>) -> bool {
// We must distinguish the scenario where the internal domain
// has no sources from one where all its internal sources have
// dropped their capabilities. We do this by checking the
// probed_source_count of the domain.
if self.probed_source_count() == 0 {
frontier.less_than(self.epoch())
} else if frontier.is_empty() {
false
} else {
self.domain_probe().with_frontier(|domain_frontier| {
domain_frontier.iter().all(|t| frontier.less_than(t))
})
}
}
}
|
self.attributes.insert(name.to_string(), config);
info!("Created attribute {}", name);
|
tls.go
|
package webfmwk
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
)
type (
// ITLSConfig is used to interface the TLS implemtation.
ITLSConfig interface {
fmt.Stringer
// GetCert return the full path to the server certificate file.
GetCert() string
// GetKey return the full path to the server key file.
GetKey() string
// GetCa return the full path to the server ca cert file.
GetCa() string
// GetInsecure return true if the TLS Certificate shouldn't be checked.
GetInsecure() bool
// IsEmpty return true if the config is empty.
Empty() bool
}
// TLSConfig contain the tls config passed by the config file.
// It implement TLSConfig
TLSConfig struct {
Cert string `json:"cert" mapstructur:"cert"`
Key string `json:"key" mapstructur:"key"`
Ca string `json:"ca" mapstructur:"ca"`
Insecure bool `json:"insecure" mapstructur:"insecure"`
}
)
var (
// DefaultCurve TLS curve supported
DefaultCurve = []tls.CurveID{
tls.CurveP256,
tls.X25519,
}
// DefaultCipher accepted
DefaultCipher = []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // HTTP/2-required AES_128_GCM_SHA256 cipher
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // ECDHE-RSA-AES256-GCM-SHA384
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // ECDH-RSA-AES256-GCM-SHA384
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // ECDHE-ECDSA-AES256-GCM-SHA384
tls.TLS_AES_128_GCM_SHA256, // 1.3 tls cipher
tls.TLS_AES_256_GCM_SHA384, // 1.3 tls cipher
tls.TLS_CHACHA20_POLY1305_SHA256, // 1.3 tls cipher
/* unaproved ? */
tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // ECDH-RSA-AES256-SHA384
}
)
// GetCert implemte TLSConfig.
func (config TLSConfig) GetCert() string {
return config.Cert
}
// GetKey implemte TLSConfig.
func (config TLSConfig) GetKey() string {
return config.Key
}
// GetKey implemte TLSConfig.
func (config TLSConfig) GetCa() string {
return config.Ca
}
// GetInsecure implemte TLSConfig.
func (config TLSConfig) GetInsecure() bool {
return config.Insecure
}
// Empty implemte TLSConfig.
func (config TLSConfig) Empty() bool {
return config.Cert == "" && config.Key == ""
}
// String implement Stringer interface.
func (config TLSConfig) String() string {
if config.Empty() {
return ""
}
return fmt.Sprintf("\tcert:\t%q\n\tkey:\t%q\n\tca:\t%q,\n\tinsecure:\t%t\n",
config.Cert, config.Key, config.Ca, config.Insecure)
}
// StartTLS expose an server to an HTTPS address..
func (s *Server) StartTLS(addr string, tlsStuffs ITLSConfig) {
s.internalHandler()
|
s.launcher.Start("https server "+addr, func() error {
return s.internalInit(addr).Serve(s.loadTLSListener(addr, tlsStuffs))
})
}
func (s *Server) getTLSCfg(tlsCfg ITLSConfig) *tls.Config {
cert, err := tls.LoadX509KeyPair(tlsCfg.GetCert(), tlsCfg.GetKey())
if err != nil {
s.log.Fatalf("cannot load cert [%s] and key [%s]: %s",
tlsCfg.GetCert(), tlsCfg.GetKey(), err.Error())
}
/* #nosec */
return &tls.Config{
Certificates: []tls.Certificate{cert},
PreferServerCipherSuites: true,
CurvePreferences: DefaultCurve,
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS13,
CipherSuites: DefaultCipher,
}
}
// register ca cert pool and toggle cert requirement
func (s *Server) loadCa(cfg *tls.Config, tlsCfg ITLSConfig) *tls.Config {
if tlsCfg.GetInsecure() {
return cfg
}
caCertPEM, e := ioutil.ReadFile(tlsCfg.GetCa())
if e != nil {
s.log.Fatalf("cannot load ca cert pool %q: %s", tlsCfg.GetCa(), e.Error())
}
roots := x509.NewCertPool()
if !roots.AppendCertsFromPEM(caCertPEM) {
s.log.Fatalf("failed to parse root certificate")
}
// :smirk:
cfg.ClientCAs = roots
cfg.ClientAuth = tls.RequireAndVerifyClientCert
return cfg
}
func (s *Server) loadTLSListener(addr string, tlsCfg ITLSConfig) net.Listener {
cfg := s.loadCa(s.getTLSCfg(tlsCfg), tlsCfg)
listner, err := net.Listen("tcp4", addr)
if err != nil {
s.log.Fatalf("cannot listen on %q: %s", addr, err.Error())
}
return tls.NewListener(listner, cfg)
}
| |
timeout.rs
|
//! Convenience type for representing a timeout, either infinite or finite.
use std::time::Duration;
/// Convenience type for representing a timeout, either infinite or finite.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Timeout {
|
pub(crate) duration: Option<Duration>,
}
impl Timeout {
/// Creates an infinite timeout.
pub fn infinite() -> Self { Self { duration: None } }
/// Creates an empty (zero) timeout, in fact, no timeout so to speak.
pub fn none() -> Self { Duration::default().into() }
/// Is this timeout infinite?
pub fn is_infinite(&self) -> bool { self.duration.is_none() }
/// This timeout's duration, if not infinite.
pub fn duration(&self) -> Option<Duration> { self.duration }
}
impl From<Duration> for Timeout {
fn from(duration: Duration) -> Self { Self { duration: Some(duration) } }
}
| |
test_mobilenet.py
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
|
videoUrl = "/sharefolder/sdc/sdc-data/ossdc-simulator-TheCrew-PS4-30fps.mp4"
webcam = False
#webcam = True
sct = None
ret = True
if webcam:
cap = WebcamVideoStream(videoUrl,(1280,720),30)
cap.start()
else:
cap = cv2.VideoCapture(videoUrl)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# ## Object detection imports
# Here are the imports from the object detection module.
# In[3]:
from utils import label_map_util
from utils import visualization_utils as vis_util
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# In[4]:
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Download Model
# In[5]:
'''
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
'''
# ## Load a (frozen) Tensorflow model into memory.
# In[6]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
# In[8]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
# In[9]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# In[10]:
from datetime import datetime
precision = 10
def getCurrentClock():
#return time.clock()
return datetime.now()
frameCnt=0
prevFrameCnt=0
prevTime=getCurrentClock()
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
if sct is not None or webcam or cap.grab():
if sct is not None:
frame = numpy.asarray(sct.grab(mon))
else:
if webcam:
frame = cap.read()
else:
flag, frame = cap.retrieve()
if not flag:
continue
image_np = frame
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
frameCnt=frameCnt+1
nowMicro = getCurrentClock()
delta = (nowMicro-prevTime).total_seconds()
if delta>=1.0:
fpsValue = ((frameCnt-prevFrameCnt)/delta)
print("FPS = %3.2f, Frame = %6d" % (fpsValue, frameCnt))
prevFrameCnt=frameCnt
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
from webcamvideostream import *
videoUrl = 1
|
image-uploader.component.ts
|
import { Component, OnInit, ChangeDetectorRef } from '@angular/core';
import { FormBuilder, Validators } from '@angular/forms';
import { HttpClientService } from '../../services/http-client.service';
@Component({
selector: 'image-uploader',
templateUrl: './image-uploader.component.html',
styleUrls: ['./image-uploader.component.scss']
})
export class
|
implements OnInit {
formGroup = this.fb.group({
img2D: [null, Validators.required],
comment2D: [''],
imgH: [null, Validators.required],
commentH: [''],
imgV: [null, Validators.required],
commentV: [''],
commentAll: ['']
});
constructor(
private fb: FormBuilder,
private cd: ChangeDetectorRef,
private httpClientService: HttpClientService
) {}
ngOnInit() {}
onFileChange(event) {
let reader = new FileReader();
if (event.target.files && event.target.files.length) {
const [file] = event.target.files;
reader.readAsDataURL(file);
reader.onload = () => {
console.log(event.target.id);
if (event.target.id === 'img2D') {
this.formGroup.patchValue({
img2D: reader.result
});
} else if (event.target.id === 'imgH') {
this.formGroup.patchValue({
imgH: reader.result
});
} else if (event.target.id === 'imgV') {
this.formGroup.patchValue({
imgV: reader.result
});
}
// need to run CD since file load runs outside of zone
this.cd.markForCheck();
};
}
}
onSubmit() {
this.httpClientService
.postPosition(this.formGroup.value)
.then(response => {
console.log(response);
})
.catch(error => console.log(error));
}
}
|
ImageUploaderComponent
|
v1alpha1_metrics.py
|
# coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v2.11.8
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1Metrics(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'prometheus': 'list[V1alpha1Prometheus]'
}
attribute_map = {
'prometheus': 'prometheus'
}
def __init__(self, prometheus=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1Metrics - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._prometheus = None
self.discriminator = None
self.prometheus = prometheus
@property
def prometheus(self):
"""Gets the prometheus of this V1alpha1Metrics. # noqa: E501
Prometheus is a list of prometheus metrics to be emitted # noqa: E501
:return: The prometheus of this V1alpha1Metrics. # noqa: E501
:rtype: list[V1alpha1Prometheus]
"""
return self._prometheus
@prometheus.setter
def prometheus(self, prometheus):
|
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Metrics):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1Metrics):
return True
return self.to_dict() != other.to_dict()
|
"""Sets the prometheus of this V1alpha1Metrics.
Prometheus is a list of prometheus metrics to be emitted # noqa: E501
:param prometheus: The prometheus of this V1alpha1Metrics. # noqa: E501
:type: list[V1alpha1Prometheus]
"""
if self.local_vars_configuration.client_side_validation and prometheus is None: # noqa: E501
raise ValueError("Invalid value for `prometheus`, must not be `None`") # noqa: E501
self._prometheus = prometheus
|
filtered_factory.go
|
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filteredFactory
import (
context "context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
externalversions "knative.dev/discovery/pkg/client/informers/externalversions"
client "knative.dev/discovery/pkg/client/injection/client"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct {
Selector string
}
type LabelKey struct{}
func WithSelectors(ctx context.Context, selector ...string) context.Context {
return context.WithValue(ctx, LabelKey{}, selector)
}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
untyped := ctx.Value(LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
thisOpts := append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selector
}))
ctx = context.WithValue(ctx, Key{Selector: selector},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), thisOpts...))
}
return ctx
}
|
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context, selector string) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch knative.dev/discovery/pkg/client/informers/externalversions.SharedInformerFactory with selector %s from context.", selector)
}
return untyped.(externalversions.SharedInformerFactory)
}
| |
database_test.py
|
# from parent folder, run 'python -m xutil.tests.helpers_test'
import unittest, os
from path import Path
from unittest.mock import patch
from xutil.helpers import *
from xutil.database.base import get_conn
import docker # https://github.com/docker/docker-py
from collections import namedtuple
class BaseDBTest(unittest.TestCase):
timeout = 40
@classmethod
def setUpClass(cls):
cls.client = docker.from_env()
cls.container = cls.client.containers.run(**cls.cntnr_params)
log('-Waiting for {} to start...'.format(cls.db_name))
st = now()
os.environ['PROFILE_YAML'] = get_dir_path(
__file__) + '/../database/templates/profile.yaml'
cls.conn = None
while 1:
time.sleep(1)
try:
cls.conn = get_conn(cls.db_name, echo=False)
break
except Exception as E:
if tdelta_seconds(now(), st) > cls.timeout:
cls.container.kill()
raise E
@classmethod
def tearDownClass(cls):
cls.conn.connection.close()
cls.container.kill()
class TestPostGres(BaseDBTest):
db_name = 'PG_TEST'
cntnr_params = dict(
image="postgres:9.6",
detach=True,
remove=True,
ports={5432: 35432},
environment={
'POSTGRES_DB': "test_db",
'POSTGRES_USER': "user",
'POSTGRES_PASSWORD': "password",
},
)
def test_load_data(self):
table_name = 'public.test_table'
Row = namedtuple('Row', 'id name state time comment timestamp')
test_data = []
for i in range(51):
test_data += [
Row('a'+str(i), 'Fritz', 'Florida', epoch(), 'some\ncomment\nwith new line', now()),
Row('b'+str(i), 'James', 'California', epoch(),
'''comment\twith\n comma, 'tab' and "quotes" ''',
now())
]
field_types = dict(
id=('string', 0, 15),
name=('string', 0, 100),
state=('string', 0, 100),
time=('integer', 0, epoch()),
comment=('string', 0, 100),
timestamp=('timestamp', 0, 100),
)
self.conn.batch_size = 100
self.conn.create_table(table_name, field_types)
self.conn.execute('ALTER TABLE test_table ADD PRIMARY KEY (id)')
count = self.conn.insert(table_name, test_data)
self.assertEqual(count, len(test_data))
fields, rows = self.conn.execute(
'select * from ' + table_name, dtype='tuple')
self.assertEqual([tuple(r) for r in test_data], rows)
self.assertEqual(fields, list(Row._fields))
# Test analysis
data = self.conn.analyze_fields('field_chars', table_name)
self.assertEqual(fields, [r.field for r in data])
data = self.conn.analyze_fields('field_stat_deep', table_name)
self.assertEqual(fields, [r.field for r in data])
data = self.conn.analyze_fields('field_stat_deep', table_name)
self.assertEqual(fields, [r.field for r in data])
data = self.conn.analyze_fields('distro_field', table_name, fields=['state'])
self.assertEqual(2, len(data))
data = self.conn.analyze_fields('distro_field_date', table_name, fields=['timestamp'])
self.assertEqual(1, len(data))
# Test replace
test_data[0] = Row(test_data[0][0], 'Emma', 'Florida', epoch(), 'some\ncomment\nwith new line', now())
count = self.conn.replace('test_table', test_data, pk_fields=['id'])
fields, rows = self.conn.execute(
'select * from test_table', dtype='tuple')
self.assertEqual([tuple(r) for r in test_data], rows)
class TestOracle(BaseDBTest):
db_name = 'ORCL_TEST'
cntnr_params = dict(
image="flarco/oracle-xe-11g:v1",
detach=True,
remove=True,
ports={1521: 31521},
)
def
|
(self):
table_name = 'system.test_table'
Row = namedtuple('Row', 'id_ name_ state_ time_ comment_ timestamp_')
test_data = []
for i in range(51):
test_data += [
Row('a'+str(i), 'Fritz', 'Florida', epoch(), 'some\ncomment\nwith new line', now().replace(microsecond=0)),
Row('b'+str(i), 'James', 'California', epoch(),
'''comment\twith\n comma, 'tab' and "quotes" ''',
now().replace(microsecond=0))
]
field_types = dict(
id_=('string', 15, None),
name_=('string', 100, None),
state_=('string', 100, None),
time_=('integer', 15, None),
comment_=('string', 100, None),
timestamp_=('timestamp', None, None),
)
self.conn.batch_size = 100
self.conn.create_table(table_name, field_types)
count = self.conn.insert(table_name, test_data)
self.assertEqual(count, len(test_data))
fields, rows = self.conn.execute(
'select * from '+table_name, dtype='tuple')
self.assertEqual([tuple(r) for r in test_data], rows)
self.assertEqual(fields, list(Row._fields))
# Test analysis
data = self.conn.analyze_fields('field_chars', table_name)
self.assertEqual(fields, [r.field for r in data])
data = self.conn.analyze_fields('field_stat_deep', table_name)
self.assertEqual(fields, [r.field for r in data])
data = self.conn.analyze_fields('field_stat_deep', table_name)
self.assertEqual(fields, [r.field for r in data])
data = self.conn.analyze_fields('distro_field', table_name, fields=['state_'])
self.assertEqual(2, len(data))
data = self.conn.analyze_fields('distro_field_date', table_name, fields=['timestamp_'])
self.assertEqual(1, len(data))
if __name__ == '__main__':
unittest.main()
|
test_load_data
|
parse_target.go
|
package addrs
import (
"fmt"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// Target describes a targeted address with source location information.
type Target struct {
Subject Targetable
SourceRange tfdiags.SourceRange
}
// ParseTarget attempts to interpret the given traversal as a targetable
// address. The given traversal must be absolute, or this function will
// panic.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the Target value is invalid and
// must not be used.
func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) {
path, remain, diags := parseModuleInstancePrefix(traversal)
if diags.HasErrors() {
return nil, diags
}
rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange())
if len(remain) == 0 {
return &Target{
Subject: path,
SourceRange: rng,
}, diags
}
mode := ManagedResourceMode
if remain.RootName() == "data"
|
if len(remain) < 2 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Resource specification must include a resource type and name.",
Subject: remain.SourceRange().Ptr(),
})
return nil, diags
}
var typeName, name string
switch tt := remain[0].(type) {
case hcl.TraverseRoot:
typeName = tt.Name
case hcl.TraverseAttr:
typeName = tt.Name
default:
switch mode {
case ManagedResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource type name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
case DataResourceMode:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A data source name is required.",
Subject: remain[0].SourceRange().Ptr(),
})
default:
panic("unknown mode")
}
return nil, diags
}
switch tt := remain[1].(type) {
case hcl.TraverseAttr:
name = tt.Name
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource name is required.",
Subject: remain[1].SourceRange().Ptr(),
})
return nil, diags
}
var subject Targetable
remain = remain[2:]
switch len(remain) {
case 0:
subject = path.Resource(mode, typeName, name)
case 1:
if tt, ok := remain[0].(hcl.TraverseIndex); ok {
key, err := ParseInstanceKey(tt.Key)
if err != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: fmt.Sprintf("Invalid resource instance key: %s.", err),
Subject: remain[0].SourceRange().Ptr(),
})
return nil, diags
}
subject = path.ResourceInstance(mode, typeName, name, key)
} else {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Resource instance key must be given in square brackets.",
Subject: remain[0].SourceRange().Ptr(),
})
return nil, diags
}
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "Unexpected extra operators after address.",
Subject: remain[1].SourceRange().Ptr(),
})
return nil, diags
}
return &Target{
Subject: subject,
SourceRange: rng,
}, diags
}
// ParseTargetStr is a helper wrapper around ParseTarget that takes a string
// and parses it with the HCL native syntax traversal parser before
// interpreting it.
//
// This should be used only in specialized situations since it will cause the
// created references to not have any meaningful source location information.
// If a target string is coming from a source that should be identified in
// error messages then the caller should instead parse it directly using a
// suitable function from the HCL API and pass the traversal itself to
// ParseTarget.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned target may be nil or incomplete.
func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return nil, diags
}
target, targetDiags := ParseTarget(traversal)
diags = diags.Append(targetDiags)
return target, diags
}
// ParseAbsResource attempts to interpret the given traversal as an absolute
// resource address, using the same syntax as expected by ParseTarget.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the AbsResource value is invalid and
// must not be used.
func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) {
addr, diags := ParseTarget(traversal)
if diags.HasErrors() {
return AbsResource{}, diags
}
switch tt := addr.Subject.(type) {
case AbsResource:
return tt, diags
case AbsResourceInstance: // Catch likely user error with specialized message
// Assume that the last element of the traversal must be the index,
// since that's required for a valid resource instance address.
indexStep := traversal[len(traversal)-1]
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.",
Subject: indexStep.SourceRange().Ptr(),
})
return AbsResource{}, diags
case ModuleInstance: // Catch likely user error with specialized message
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required here. The module path must be followed by a resource specification.",
Subject: traversal.SourceRange().Ptr(),
})
return AbsResource{}, diags
default: // Generic message for other address types
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required here.",
Subject: traversal.SourceRange().Ptr(),
})
return AbsResource{}, diags
}
}
// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a
// string and parses it with the HCL native syntax traversal parser before
// interpreting it.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned address may be incomplete.
//
// Since this function has no context about the source of the given string,
// any returned diagnostics will not have meaningful source location
// information.
func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return AbsResource{}, diags
}
addr, addrDiags := ParseAbsResource(traversal)
diags = diags.Append(addrDiags)
return addr, diags
}
// ParseAbsResourceInstance attempts to interpret the given traversal as an
// absolute resource instance address, using the same syntax as expected by
// ParseTarget.
//
// If no error diagnostics are returned, the returned target includes the
// address that was extracted and the source range it was extracted from.
//
// If error diagnostics are returned then the AbsResource value is invalid and
// must not be used.
func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) {
addr, diags := ParseTarget(traversal)
if diags.HasErrors() {
return AbsResourceInstance{}, diags
}
switch tt := addr.Subject.(type) {
case AbsResource:
return tt.Instance(NoKey), diags
case AbsResourceInstance:
return tt, diags
case ModuleInstance: // Catch likely user error with specialized message
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.",
Subject: traversal.SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
default: // Generic message for other address types
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address",
Detail: "A resource address is required here.",
Subject: traversal.SourceRange().Ptr(),
})
return AbsResourceInstance{}, diags
}
}
// ParseAbsResourceInstanceStr is a helper wrapper around
// ParseAbsResourceInstance that takes a string and parses it with the HCL
// native syntax traversal parser before interpreting it.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// the returned address may be incomplete.
//
// Since this function has no context about the source of the given string,
// any returned diagnostics will not have meaningful source location
// information.
func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return AbsResourceInstance{}, diags
}
addr, addrDiags := ParseAbsResourceInstance(traversal)
diags = diags.Append(addrDiags)
return addr, diags
}
|
{
mode = DataResourceMode
remain = remain[1:]
}
|
Webserver.py
|
from flask import Flask, render_template, request
from dashboard_forms import Dashform
#import create_pickle as p_j
import json
import os
app = Flask(__name__)
app.secret_key = 'dash_flask_key'
creddir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'credentials/dash_id.json')
# creddir_2 = os.path.join(os.path.dirname(
# os.path.dirname(os.path.realpath(__file__))), 'credentials')
tempdir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'www/templates/dash_id_template.json')
def Convert(string):
li = list(string.split(","))
k = []
for i in li:
str(i).replace(' ', '')
k.append(i)
return k
def
|
(string):
string = string.replace("[", "")
string = string.replace("]", "")
string = string.replace("'", "")
string = string.replace(" ", "")
return string
def json_exists(file_name):
return os.path.exists(file_name)
def getinfo():
data = []
if json_exists(creddir):
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
else:
with open(tempdir, "r") as f1, open(creddir, "w+") as f2:
f2.write(f1.read())
f2.close
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
def save_json(res):
with open(creddir, 'r') as f:
data = json.load(f)
data["Transit"]["T_URL"] = res["T_URL"]
data["Transit"]["T_API_KEY"] = res["T_API_KEY"]
data["Transit"]["Stops"] = Convert(res["Stops"])
data["Transit"]["T_BUS"] = res["T_BUS"]
data["Transit"]["T_BUS_TIME"] = res["T_BUS_TIME"]
data["Weather"]["W_URL"] = res["W_URL"]
data["Weather"]["UNITS"] = res["UNITS"]
data["Weather"]["W_API_KEY"] = res["W_API_KEY"]
data["Geolocation"]["G_URL"] = res["G_URL"]
data["Geolocation"]["G_API_KEY"] = res["G_API_KEY"]
data["Currency"]["C_URL_1"] = res["C_URL_1"]
data["Currency"]["C_API_KEY_1"] = res["C_API_KEY_1"]
data["Currency"]["C_URL_3"] = res["C_URL_3"]
data["Currency"]["C_URL_4"] = res["C_URL_4"]
data["Currency"]["CURR_CHECK"] = Convert(res["CURR_CHECK"])
data["Stocks"]["STOCK_W_URL"] = res["STOCK_W_URL"]
data["Stocks"]["STOCK_WE_URL"] = res["STOCK_WE_URL"]
data["Stocks"]["STOCK_API"] = res["STOCK_API"]
data["Stocks"]["STOCK_CHECK"] = Convert(res["STOCK_CHECK"])
data["Tasklist"]["gsheet_json"] = res["gsheet_json"]
data["Tasklist"]["sheetname"] = res["sheetname"]
data["G_Meetings"]["CREDENTIALS_FILE"] = res["CREDENTIALS_FILE"]
data["News"]["NEWS_URL"] = res["NEWS_URL"]
data["News"]["NEWS_API"] = res["NEWS_API"]
data["News"]["NEWS_SOURCES"] = str(res["NEWS_SOURCES"]).replace(' ', '')
data["System"]["waking_time"] = res["waking_time"]
data["System"]["sleeping_time"] = res["sleeping_time"]
data["System"]["mod_1_choice"] = res["mod_1_choice"]
data["System"]["mod_2_choice"] = res["mod_2_choice"]
data["System"]["mod_3_choice"] = res["mod_3_choice"]
data["System"]["mod_4_choice"] = res["mod_4_choice"]
data["System"]["refresh_time"] = res["refresh_time"]
data["System"]["awake"] = res["awake"]
os.remove(creddir)
with open(creddir, 'w+') as f:
json.dump(data, f, indent=4)
@ app.route('/', methods=['POST', 'GET'])
def login():
form = Dashform()
d_data = getinfo()
form.res_msg.label = ""
if request.method == 'POST':
form.res_msg.label = ""
if request.form['btn'] == 'Submit':
results = request.form
save_json(results)
form.res_msg.label = "Information saved successfully"
'''elif request.form['btn'] == 'Generate Pickle File':
results = request.form
p_j.get_calendar_service(results["CREDENTIALS_FILE"], creddir_2)
'''
d_data = getinfo()
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
elif request.method == 'GET':
# populate the form on start
d_data = getinfo()
form.res_msg.label = ""
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@ app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
formatting
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.