repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
602p/orth
|
os/kernel/font/font.py
|
Python
|
lgpl-3.0
| 19,506 | 0.000103 |
#adapted from ColorWall by jesstesstest
_font = (
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("#
|
#######")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
|
("########")
),
(
("........"),
("........"),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........"),
("....#..."),
("........")
),
(
("...#.#.."),
("...#.#.."),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
("...#.#.."),
("...#.#.."),
(".#######"),
("...#.#.."),
(".#######"),
("...#.#.."),
("...#.#.."),
("........")
),
(
("....#..."),
("...####."),
("..#.#..."),
("...###.."),
("....#.#."),
("..####.."),
("....#..."),
("........")
),
(
("........"),
("..##..#."),
("..##.#.."),
("....#..."),
("...#.##."),
("..#..##."),
("........"),
("........")
),
(
("...##..."),
("..#.#..."),
("...#...."),
("..#.#..."),
(".#...##."),
(".#...#.."),
("..###.#."),
("........")
),
(
("....#..."),
("....#..."),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
(".....#.."),
("....#..."),
("...#...."),
("...#...."),
("...#...."),
("....#..."),
(".....#.."),
("........")
),
(
("...#...."),
("....#..."),
(".....#.."),
(".....#.."),
(".....#.."),
("....#..."),
("...#...."),
("........")
),
(
("....#..."),
(".#..#..#"),
("..#.#.#."),
("...###.."),
("..#.#.#."),
(".#..#..#"),
("....#..."),
("........")
),
(
("....#..."),
("....#..."),
("....#..."),
(".#######"),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("........"),
("........"),
("........"),
("........"),
("....##.."),
("....##.."),
(".....#.."),
("....#...")
),
(
("........"),
("........"),
("........"),
(".#######"),
("........"),
("........"),
("........"),
("........")
),
(
("........"),
("........"),
("........"),
("........"),
("........"),
("....##.."),
("....##.."),
("........")
),
(
(".......#"),
("......#."),
(".....#.."),
("....#..."),
("...#...."),
("..#....."),
(".#......"),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("..#.#.#."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
(" "),
(" ..##..."),
(" ...#..."),
(" ...#..."),
(" ...#..."),
(" ...#..."),
(" ..###.."),
(" ")
),
(
("...###.."),
("..#...#."),
("......#."),
(".....#.."),
("....#..."),
("...#...."),
("..#####."),
("........")
),
(
("...###.."),
("..#...#."),
("......#."),
("....##.."),
("......#."),
("..#...#."),
("...###.."),
("........")
),
(
("....##.."),
("...#.#.."),
("..#..#.."),
("..#####."),
(".....#.."),
(".....#.."),
("....###."),
("........")
),
(
("..#####."),
("..#....."),
("..#....."),
("..####..")
|
rohitranjan1991/home-assistant
|
homeassistant/components/plex/media_player.py
|
Python
|
mit
| 19,911 | 0.000904 |
"""Support to interface with the Plex API."""
from __future__ import annotations
from functools import wraps
import json
import logging
import plexapi.exceptions
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.network import is_internal_request
from .const import (
COMMON_PLAYERS,
CONF_SERVER_IDENTIFIER,
DISPATCHERS,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEX_URI_SCHEME,
SERVERS,
TRANSIENT_DEVICE_MODELS,
)
from .media_browser import browse_media
_LOGGER = logging.getLogger(__name__)
def needs_session(func):
"""Ensure session is available for certain attributes."""
@wraps(func)
def get_session_attribute(self, *args):
if self.session is None:
return None
return func(self, *args)
return get_session_attribute
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Plex media_player from a config entry."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
registry = await async_get_registry(hass)
@callback
def async_new_media_players(new_entities):
_async_add_entities(hass, registry, async_add_entities, server_id, new_entities)
unsub = async_dispatcher_connect(
hass, PLEX_NEW_MP_SIGNAL.format(server_id), async_new_media_players
)
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
_LOGGER.debug("New entity listener created")
@callback
def _async_add_entities(hass, registry, async_add_entities, server_id, new_entities):
"""Set up Plex media_player entities."""
_LOGGER.debug("New entities: %s", new_entities)
entities = []
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
for entity_params in new_entities:
plex_mp = PlexMediaPlayer(plexserver, **entity_params)
entities.append(plex_mp)
# Migration to per-server unique_ids
old_entity_id = registry.async_get_entity_id(
MP_DOMAIN, PLEX_DOMAIN, plex_mp.machine_identifier
)
if old_entity_id is not None:
new_unique_id = f"{server_id}:{plex_mp.machine_identifier}"
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
plex_mp.machine_identifier,
new_unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=new_unique_id)
async_add_entities(entities, True)
class PlexMediaPlayer(MediaPlayerEntity):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, player_source, session=None):
"""Initialize the Plex device."""
self.plex_server = plex_server
self.device = device
self.player_source = player_source
self.device_make = None
self.device_platform = None
self.device_product = None
self.device_title = None
self.device_version = None
self.machine_identifier = device.machineIdentifier
self.session_device = None
self._device_protocol_capabilities = None
self._previous_volume_level = 1 # Used in fake muting
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
self._attr_available = False
self._attr_should_poll = False
self._attr_state = STATE_IDLE
self._attr_unique_id = (
f"{self.plex_server.machine_identifier}:{self.machine_identifier}"
)
# Initializes other attributes
self.session = session
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
_LOGGER.debug("Added %s [%s]", self.entity_id, self.unique_id)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(self.unique_id),
self.async_refresh_media_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(self.unique_id),
self.async_update_from_websocket,
)
)
@callback
def async_refresh_media_player(self, device, session, source):
"""Set instance objects and trigger an entity state update."""
_LOGGER.debug("Refreshing %s [%s / %s]", self.entity_id, device, session)
self.device = device
self.session = session
if source:
self.player_source = source
self.async_schedule_update_ha_state(True)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
@callback
def async_update_from_websocket(self, state):
"""Update the entity based on new websocket data."""
self.update_state(state)
self.async_write_ha_state()
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
def update(self):
"""Refresh key device data."""
if not self.session:
self.force_idle()
if not self.device:
self._attr_available = False
return
self._attr_available = True
try:
device_url = self.device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self.device.proxyThroughServer()
self._device_protocol_capabilities = self.device.protocolCapabilities
for device in filter(None, [self.device, self.session_device]):
self.device_make = self.device_make or device.device
self.device_platform = self.device_platform or device.platform
self.device_product = self.device_product or device.product
self.device_title = self.device_title or device.title
self.device_version = self.device_version or device.version
name_parts = [self.device_product, self.device_title or self.device_platform]
if (self.device_product in COMMON_PLAYERS) and self.device_make:
# Add more context in name for likely duplicates
name_parts.append(self.device_make)
if self.username and self.username != self.plex_server.owner:
# Prepend username for shared/managed clients
name_parts.insert(0, self.username)
self._attr_name = NAME_FORMAT.format(" - ".joi
|
n(name_parts))
def force_idle(self):
"""Force client to idle."""
self._attr_state = STATE_IDLE
if self.player_source == "session":
self.device = None
self.session_device = None
|
self._attr_available = False
@property
def session(self):
"""Return the active session for this player."""
return self._session
|
hkariti/ansible
|
lib/ansible/modules/network/enos/enos_config.py
|
Python
|
gpl-3.0
| 9,930 | 0.000604 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied w
|
arranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to configure Lenovo Switches.
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__
|
= type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: enos_config
version_added: "2.5"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage Lenovo ENOS configuration sections
description:
- Lenovo ENOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ENOS configuration sections in
a deterministic way.
extends_documentation_fragment: enos
notes:
- Tested against ENOS 8.4.1.2
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines), I(parents).
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
required: false
default: 'configured by enos_config'
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
required: false
default: false
choices: [ "yes", "no" ]
"""
EXAMPLES = """
- name: configure top level configuration
enos_config:
"lines: hostname {{ inventory_hostname }}"
- name: configure interface settings
enos_config:
lines:
- enable
- ip ospf enable
parents: interface ip 13
- name: load a config from disk and replace the current config
enos_config:
src: config.cfg
backup: yes
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/enos01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.enos.enos import load_config, get_config
from ansible.module_utils.network.enos.enos import enos_argument_spec
from ansible.module_utils.network.enos.enos import check_args
from ansible.module_utils.network.common.config import NetworkConfig, dumps
DEFAULT_COMMIT_COMMENT = 'configured by enos_config'
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
admin = module.params['admin']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if m
|
idosekely/python-lessons
|
lesson_1/variables.py
|
Python
|
mit
| 403 | 0 |
__author__ = 'sekely'
'''
we are
|
using variables almost everywhere in the code.
variables are used to store results, calculations and many more.
this of it as the famous "x" from high school
x = 5, right?
the only thing is, that in Python "x" can store anything
'''
# try this code:
x = 5
y = x + 3
print(y)
# what about this? will it work?
x = 'hello'
y = ' '
z = 'w
|
orld!'
w = x + y + z
print(w)
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/topology_parameters.py
|
Python
|
mit
| 1,697 | 0.001768 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyParameters(Model):
"""Parameters that define the representation of topology.
:param target_resource_group_name: The name of the target resource group
to perform topology on.
:type target_resource_group_name: str
:param target_virtual_network: The reference of the Virtual Network
resource.
:type target_virtual_network:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param target_subnet: The reference of the Subnet resource.
:type target_subnet: ~azure.mgmt.network.v2017_11_01.models.SubResource
"""
_attribute_map = {
'target_resource_group_name': {'key': 'targetResourceGroupName', 'type': 'str'},
'target_virtual_network': {'key': 'targetVirtualNetwork', 'type': 'SubResource'},
'target_subnet': {'key': 'targetSubnet', 'type': 'SubResource'},
}
def __init__(self, target_resource_group_name=None, target_virtual_network=None, target_subnet=None):
super(TopologyParameters, self).__init__()
self.target_resource_group_name = target_resource_group_name
self.target_virtual_network = target_virtual_n
|
etw
|
ork
self.target_subnet = target_subnet
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_usages_operations.py
|
Python
|
mit
| 5,274 | 0.004361 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsagesListResult"]
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._ ]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request =
|
self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('U
|
sagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
|
ostrokach/biskit
|
Biskit/Model.py
|
Python
|
gpl-3.0
| 10,389 | 0.02108 |
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
## last $Author$
## last $Date$
## $Revision$
"""Work in progress"""
import copy
import numpy as N
import Biskit as B
import Biskit.tools as T
import Biskit.molUtils as MU
def index2map( index, len_i ):
"""
For example
index2map([3,5,10], 12) ==> [0,0,0, 1,1, 2,2,2,2,2, 3,3,3]
@param index: list of starting positions, e.g. [0, 3, 8]
@type index: [ int ] or N.array of int
@param len_i: length of target map, e.g. 10
@type len_i: int
@return: list mapping atom positions to residue(/chain) number,
e.g. [0,0,0, 1,1,1,1,1, 2,2] from above example
@rtype: N.array of int (and of len_i length)
"""
index = N.concatenate( (index, [len_i]) )
delta = index[1:] - index[:-1]
return N.repeat( range(len(delta)), delta)
def map2index( imap ):
"""
Identify the starting positions of each residue(/chain) from a map
giving the residue(/chain) number of each atom.
@param imap: something like [0,0,0,1,1,1,1,1,2,2,2,...]
@type imap: [ int ]
@return: list of starting positions, e.g. [0, 3, 8, ...] in above ex.
@rtype: N.array of int
"""
try:
imap = N.concatenate( (imap, [imap[-1]] ) )
delta = imap[1:] - imap[:-1]
r = N.flatnonzero( delta ) + 1
return N.concatenate( ( [0], r ) )
except IndexError:
## handle empty imap parameter
return N.zeros(0)
class Model( object ):
"""
Model is intended to become the common base class for PDBModel and
Polymer.
"""
#: [str], default profiles for atoms
ATOM_KEYS = []
#: [str], default profiles for residues
RESIDUE_KEYS = []
#: [str], default profiles for chains
CHAIN_KEYS = []
def __init__(self):
"""
Create a new empty Model instance. Don't use this constructor directly.
"""
#: starting (atom) position of each residue
self._resIndex = None
#: starting position of each chain
self._chainIndex = None
#: values associated with atoms
self.atoms = B.ProfileCollection()
#: values associated with residues
self.residues = B.ProfileCollection()
#: values associated with chains or molecules
self.chains = B.ProfileCollection()
for key in self.ATOM_KEYS:
self.atoms.set( key, [], asarray=False )
for key in self.RESIDUE_KEYS:
self.residues.set( key, [], asarray=False )
for key in self.CHAIN_KEYS:
self.chains.set( key, [], asarray=False )
#: Meta info
self.info = { 'date':T.dateSortString() }
self.__version__ = self.version()
def version( self ):
return "Model $Revision$"
def __len__( self ):
return self.lenAtoms()
def __getitem__( self, k ):
"""
Get atom profile or profile item or CrossView for one atom::
m['prof1'] <==> m.atoms.get( 'prof1' )
m['prof1','info1'] <==> m.atoms.get( 'prof1','info1' )
m[10] <==> CrossView( m.atoms, 10 )
@return: profile OR meta infos thereof OR CrossView dict
@rtype: list OR array OR any OR CrossView
"""
if type( k ) is str:
if k in self.atoms:
return self.atoms.get( k )
if k in self.residues:
return self.residues.get( k )
if k in self.chains:
return self.chains.get( k )
if k in self.info:
return self.info[ k ]
if type( k ) is tuple:
return self.profileInfo( k[0] )[ k[1] ]
return self.atoms[k]
def __setitem__( self, k, v ):
"""
Set atom profile or profile item (or meta info)::
m['prof1'] = range(10) <==> m.atoms.set( 'prof1', range(10) )
OR <==> m.residues.set( 'prof1', range(10) )
m['prof1','info1]='comment'
<==> m.atoms.setInfo('prof1',info1='comment')
OR <==> m.residues.setInfo('prof1',info1='comment')
m['version'] = '1.0.0' <==> m.info['version'] = '1.0.0'
but only if 'version' already exists in m.info
@return: item
@rtype: any
"""
if type( k ) is str:
if v is not None and len( v ) == self.lenAtoms():
return self.atoms.set( k, v )
if v is not None and len( v ) == self.lenResidues():
return self.residues.set( k, v )
if v is not None and len( v ) == self.lenChains():
return self.chains.set( k, v )
if k in self.atoms:
return self.atoms.set( k, v )
if k in self.residues:
return self.residues.set( k, v )
|
if k in self.chains:
return self.chains.set( k, v )
if k in self.info:
self.info[ k ] = v
raise ProfileError, \
'Value cannot clearly be assigned to either atom or '+\
|
'residue or chain profiles'
if type( k ) is tuple:
key, infokey = k
if key in self.atoms:
self.atoms[key, infokey] = v
return
if key in self.residues:
self.residues[key, infokey] = v
return
self.chains[key, infokey] = v
return
raise ProfileError, \
'Cannot interpret %r as profile name or profile info record' % k
def __getslice__( self, *arg ):
"""
Get list of CrossViews::
m[0:100:5] <==> [ CrossView(m.atoms, i) for i in range(0,100,5) ]
"""
return self.atoms.__getslice__( *arg )
def __iter__( self ):
return self.atoms.iterCrossViews()
def _concatHook( self, resultModel, nextModel ):
pass
def concat( self, *models ):
"""
Concatenate the given models (in given order) to this model.
Note for developers: concat is called recursively on the growing model.
The typical pattern for overriding this method is hence a bit
different. See _concatHook()!
@param models: models to concatenate
@type models: Model, Model, ...
@return: resulting model
@rtype: Model or subclass thereof
"""
if len( models ) == 0:
return self
m = models[0]
r = self.__class__()
r.residues = self.residues.concat( m.residues, )
r.atoms = self.atoms.concat( m.atoms )
r._resIndex = N.concatenate(
(self._resIndex, m._resIndex + self.lenAtoms()))
r._chainIndex =N.concatenate(
(self._chainIndex, m._chainIndex +self.lenAtoms()))
r.info = copy.deepcopy( self.info )
self._concatHook( r, m )
return r.concat( *models[1:] )
def lenAtoms(self):
"""
@return: number of atoms in this model
@rtype: int
"""
return self.atoms.profLength()
def lenResidues( self ):
"""
@return: number of residues in this model
|
develru/RobotControlCenterKivy
|
main.py
|
Python
|
gpl-3.0
| 210 | 0 |
from kivy.
|
app import App
from kivy.uix.button import Button
class RobotControlApp(App):
def build(self):
return Button(text='Hello World')
if __name__ == '__main__':
RobotControlApp().run(
|
)
|
nobodyinperson/python3-numericalmodel
|
tests/test_data.py
|
Python
|
gpl-3.0
| 794 | 0.011335 |
#!/usr/bin/env python3
# internal modules
import numericalmodel
# external modules
import numpy as np
EMPTY_ARRAY = np.array([])
class LinearDecayEquation(numericalmodel.equations.PrognosticEquation):
"""
Class for the linear decay equation
"""
def linear_factor(self, time = None ):
# take the "a" parameter from the input, interpolate it to the given
# "time" and return the negative value
return - self.input["a"](time)
def independent_addend(self, time = None ):
# take the "F" forcing parameter from the input, interpolate it to
# the given "time" and return it
return self.input["F"](time)
def nonlinear_ad
|
dend(self, *args, **kwargs):
return 0 # nonlinear addend is always zero (LINEA
|
R decay equation)
|
USStateDept/geonode
|
geonode/groups/models.py
|
Python
|
gpl-3.0
| 9,016 | 0.001331 |
import datetime
import hashlib
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
from django.db import models, IntegrityError
from django.utils.translation import ugettext_lazy as _
from django.db.models import signals
from taggit.managers import TaggableManager
from guardian.shortcuts import get_objects_for_group
class GroupProfile(models.Model):
GROUP_CHOICES = [
("public", _("Public")),
("public-invite", _("Public (invite-only)")),
("private", _("Private")),
]
access_help_text = _('Public: Any registered user can view and join a public group.<br>'
'Public (invite-only):Any registered user can view the group. '
'Only invited users can join.<br>'
'Private: Registered users cannot see any details about the group, including membership. '
'Only invited users can join.')
email_help_text = _('Email used to contact one or all group members, '
'such as a mailing list, shared email, or exchange group.')
group = models.OneToOneField(Group)
title = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
logo = models.ImageField(upload_to="people_group", blank=True)
description = models.TextField()
email = models.EmailField(
_('email'),
null=True,
blank=True,
help_text=email_help_text)
keywords = TaggableManager(
_('keywords'),
help_text=_("A space or comma-separated list of keywords"),
blank=True)
access = models.CharField(
max_length=15,
default="public'",
choices=GROUP_CHOICES,
help_text=access_help_text)
last_modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
group, created = Group.objects.get_or_create(name=self.slug)
self.group = group
super(GroupProfile, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
Group.objects.filter(name=self.slug).delete()
super(GroupProfile, self).delete(*args, **kwargs)
@classmethod
def groups_for_user(cls, user):
"""
Returns the groups that user is a member of. If the user is a superuser, all groups are returned.
"""
if user.is_authenticated():
if user.is_superuser:
return cls.objects.all()
return cls.objects.filter(groupmember__user=user)
return []
def __unicode__(self):
return self.title
def keyword_list(self):
"""
Returns a list of the Group's keywords.
"""
return [kw.name for kw in self.keywords.all()]
def resources(self, resource_type=None):
"""
Returns a generator of objects that this group has permissions on.
:param resource_type: Filter's the queryset to objects with the same type.
"""
queryset = get_objects_for_group(
self.group, [
'base.view_resourcebase', 'base.change_resourcebase'], any_perm=True)
if resource_type:
queryset = [
item for item in queryset if hasattr(
item,
resource_type)]
for resource in queryset:
yield resource
def member_queryset(self):
return self.groupmember_set.all()
def get_managers(self):
"""
Returns a queryset of the group's managers.
"""
return get_user_model().objects.filter(
id__in=self.member_queryset().filter(
role='manager').values_list(
"user",
flat=True))
def user_is_member(self, user):
if not user.is_authenticated():
return False
return user.id in self.member_queryset().values_list("user", flat=True)
def user_is_role(self, user, role):
if not user.is_authenticated():
return False
return self.member_queryset().filter(user=user, role=role).exists()
def can_view(self, user):
if self.access == "private":
return user.is_authenticated() and self.user_is_member(user)
else:
return True
def can_invite(self, user):
if not user.is_authenticated():
return False
return self.user_is_role(user, "manager")
def join(self, user, **kwargs):
if user == user.get_anonymous():
raise ValueError("The invited user cannot be anonymous")
member, created = GroupMember.objects.get_or_create(group=self, user=user, defaults=kwargs)
if created:
user.groups.add(self.group)
else:
raise ValueError("The invited user \"{0}\" is already a member".format(user.username))
def invite(self, user, from_user, role="member", send=True):
params = dict(role=role, from_user=from_user)
if isinstance(user, get_user_model()):
params["user"] = user
params["email"] = user.email
else:
params["email"] = user
bits = [
settings.SECRET_KEY,
params["email"],
str(datetime.datetime.now()),
settings.SECRET_KEY
]
params["token"] = hashlib.sha1("".join(bits)).hexdigest()
# If an invitation already exists, re-use it.
try:
invitation = self.invitations.create(**params)
except IntegrityError:
invitation = self.invitations.get(
group=self,
email=params["email"])
if send:
invitation.send(from_user)
return invitation
@models.permalink
def get_absolute_url(self):
return ('group_detail', (), {'slug': se
|
lf.slug})
@property
def class_name(self):
return self.__clas
|
s__.__name__
class GroupMember(models.Model):
group = models.ForeignKey(GroupProfile)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
role = models.CharField(max_length=10, choices=[
("manager", _("Manager")),
("member", _("Member")),
])
joined = models.DateTimeField(default=datetime.datetime.now)
class GroupInvitation(models.Model):
group = models.ForeignKey(GroupProfile, related_name="invitations")
token = models.CharField(max_length=40)
email = models.EmailField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
related_name="pg_invitations_received")
from_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="pg_invitations_sent")
role = models.CharField(max_length=10, choices=[
("manager", _("Manager")),
("member", _("Member")),
])
state = models.CharField(
max_length=10,
choices=(
("sent", _("Sent")),
("accepted", _("Accepted")),
("declined", _("Declined")),
),
default="sent",
)
created = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return "%s to %s" % (self.email, self.group.title)
class Meta:
unique_together = [("group", "email")]
# def send(self, from_user):
# current_site = Site.objects.get_current()
# domain = unicode(current_site.domain)
# ctx = {
# "invite": self,
# "group": self.group,
# "from_user": from_user,
# "domain": domain,
# }
# subject = render_to_string("groups/email/invite_user_subject.txt", ctx)
# message = render_to_string("groups/email/invite_user.txt", ctx)
# TODO Send a notification rather than a mail
# send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [self.email])
def accept(self, user):
if not user.is_authenticated() or user == user.get_anonymous():
raise ValueError("You must log in to accept invitations")
if not user.email == self.email:
raise ValueError(
"You can't accept an invitation that wasn't for
|
gooddata/openstack-nova
|
nova/objects/cell_mapping.py
|
Python
|
apache-2.0
| 10,122 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import versionutils
import six.moves.urllib.parse as urlparse
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql import false
from sqlalchemy.sql import true
import nova.conf
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova.objects import base
from nova.objects import fields
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def _parse_netloc(netloc):
"""Parse a user:pass@host:port and return a dict suitable for formatting
a cell mapping template.
"""
these = {
'username': None,
'password': None,
'hostname': None,
'port': None,
}
if '@' in netloc:
userpass, hostport = netloc.split('@', 1)
else:
hostport = netloc
userpass = ''
if hostport.startswith('['):
host_end = hostport.find(']')
if host_end < 0:
raise ValueError('Invalid IPv6 URL')
these['hostname'] = hostport[1:host_end]
these['port'] = hostport[host_end + 1:]
elif ':' in hostport:
these['hostname'], these['port'] = hostport.split(':', 1)
else:
these['hostname'] = hostport
if ':' in userpass:
these['username'], these['password'] = userpass.split(':', 1)
else:
these['username'] = userpass
return these
@base.NovaObjectRegistry.register
class CellMapping(base.NovaTimestampObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added disabled field
VERSION = '1.1'
CELL0_UUID = '00000000-0000-0000-0000-000000000000'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'transport_url': fields.StringField(),
'database_connection': fields.StringField(),
'disabled': fields.BooleanField(default=False),
}
def obj_make_compatible(self, p
|
rimitive, target_version):
super(CellMapping, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1):
if 'disabled' in primitive:
del primitive['disabled']
@property
def identity(self):
if 'name' in self and self.name:
return '%s(%s)' % (self.uuid, self.name)
else:
|
return self.uuid
@staticmethod
def _format_url(url, default):
default_url = urlparse.urlparse(default)
subs = {
'username': default_url.username,
'password': default_url.password,
'hostname': default_url.hostname,
'port': default_url.port,
'scheme': default_url.scheme,
'query': default_url.query,
'fragment': default_url.fragment,
'path': default_url.path.lstrip('/'),
}
# NOTE(danms): oslo.messaging has an extended format for the URL
# which we need to support:
# scheme://user:pass@host:port[,user1:pass@host1:port, ...]/path
# Encode these values, if they exist, as indexed keys like
# username1, password1, hostname1, port1.
if ',' in default_url.netloc:
netlocs = default_url.netloc.split(',')
index = 0
for netloc in netlocs:
index += 1
these = _parse_netloc(netloc)
for key in these:
subs['%s%i' % (key, index)] = these[key]
return url.format(**subs)
@staticmethod
def _format_db_url(url):
if CONF.database.connection is None:
if '{' in url:
LOG.error('Cell mapping database_connection is a template, '
'but [database]/connection is not set')
return url
try:
return CellMapping._format_url(url, CONF.database.connection)
except Exception:
LOG.exception('Failed to parse [database]/connection to '
'format cell mapping')
return url
@staticmethod
def _format_mq_url(url):
if CONF.transport_url is None:
if '{' in url:
LOG.error('Cell mapping transport_url is a template, but '
'[DEFAULT]/transport_url is not set')
return url
try:
return CellMapping._format_url(url, CONF.transport_url)
except Exception:
LOG.exception('Failed to parse [DEFAULT]/transport_url to '
'format cell mapping')
return url
@staticmethod
def _from_db_object(context, cell_mapping, db_cell_mapping):
for key in cell_mapping.fields:
val = db_cell_mapping[key]
if key == 'database_connection':
val = cell_mapping._format_db_url(val)
elif key == 'transport_url':
val = cell_mapping._format_mq_url(val)
setattr(cell_mapping, key, val)
cell_mapping.obj_reset_changes()
cell_mapping._context = context
return cell_mapping
@staticmethod
@db_api.api_context_manager.reader
def _get_by_uuid_from_db(context, uuid):
db_mapping = context.session.query(api_models.CellMapping).filter_by(
uuid=uuid).first()
if not db_mapping:
raise exception.CellMappingNotFound(uuid=uuid)
return db_mapping
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_mapping = cls._get_by_uuid_from_db(context, uuid)
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
@db_api.api_context_manager.writer
def _create_in_db(context, updates):
db_mapping = api_models.CellMapping()
db_mapping.update(updates)
db_mapping.save(context.session)
return db_mapping
@base.remotable
def create(self):
db_mapping = self._create_in_db(self._context, self.obj_get_changes())
self._from_db_object(self._context, self, db_mapping)
@staticmethod
@db_api.api_context_manager.writer
def _save_in_db(context, uuid, updates):
db_mapping = context.session.query(
api_models.CellMapping).filter_by(uuid=uuid).first()
if not db_mapping:
raise exception.CellMappingNotFound(uuid=uuid)
db_mapping.update(updates)
context.session.add(db_mapping)
return db_mapping
@base.remotable
def save(self):
changes = self.obj_get_changes()
db_mapping = self._save_in_db(self._context, self.uuid, changes)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
@staticmethod
@db_api.api_context_manager.writer
def _destroy_in_db(context, uuid):
result = context.session.query(api_models.CellMapping).filter_by(
uuid=uuid).delete()
if not result:
raise exception.CellMappingNotFound(uuid=uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.uuid)
def is_cell0(self):
return self.obj_attr_is_set('uuid') and self.uuid == self.CELL0_UUID
@base.NovaObjectRegistry.register
class CellMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add get_by_disabled()
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('C
|
DLR-SC/tigl
|
thirdparty/nsiqcppstyle/nsiqunittest/nsiqcppstyle_unittestbase.py
|
Python
|
apache-2.0
| 2,439 | 0.00451 |
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice,
|
this list of
|
conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import nsiqcppstyle_checker
import unittest
import nsiqcppstyle_rulemanager
import nsiqcppstyle_reporter
import nsiqcppstyle_state
errors = []
def AddError(err):
errors.append(err)
def CheckErrorContent(msg):
for err in errors :
if err[1] == msg :
return True
return False
def MockError(token, category, message):
AddError((token, category, message))
print token, category, message
class nct(unittest.TestCase):
def setUp(self):
nsiqcppstyle_rulemanager.ruleManager.ResetRules()
nsiqcppstyle_rulemanager.ruleManager.ResetRegisteredRules()
nsiqcppstyle_state._nsiqcppstyle_state.verbose = True
nsiqcppstyle_reporter.Error = MockError
self.setUpRule()
global errors
errors = []
def Analyze(self, filename, data):
nsiqcppstyle_checker.ProcessFile(nsiqcppstyle_rulemanager.ruleManager, filename, data)
|
DanielleWingler/UnderstandingDjango
|
TestSite/blog/__init__.py
|
Python
|
mit
| 85 | 0.023529 |
#Intentionall
|
y left blank. There should be a .pyc file by the same name at cre
|
ation.
|
b4ux1t3/adventofcode2015
|
day1/generate.py
|
Python
|
mit
| 366 | 0.005464 |
import random, sys
if len(sys.argv)!= 2:
print "Usage: python generate.py <how many instructions you want>"
sys.exit()
choices = ("(", ")")
output = ""
|
for x in range(int(sys.argv[1])):
output += random.choice(choices)
f = open("randout", "w")
f.write(output)
f.close
print "Created an instruction set that is " + sys.argv[1] + " characters lon
|
g"
|
jameslivulpi/socketprogramming
|
udpServer.py
|
Python
|
gpl-3.0
| 872 | 0.001147 |
#!/usr/bin/python
# UDPPingerServer.py
# We will need the following module to generate randomized lost packets
import random
from socket import *
# Create a UDP socket
# Notice the use of SOCK_DGRAM for UDP packets
serverSocket = socket(AF_INET, SOCK_DGRAM)
# Assign IP address and port number to socket
serverSocket.bind(('', 12026))
print("The Server is ready to receive!")
while True:
# Generate random number in the range of 0 to 10
rand = random.randint(0, 10)
# Receive the client packet along with the address it is coming from
message, address = serverSocket.recvfrom(2048)
#
|
Capitalize the message from the client
message = message.upper()
# If rand is less is than 4, we consider the packet lost and do not respond
if rand < 4:
continue
#
|
Otherwise, the server responds
serverSocket.sendto(message, address)
|
apache/libcloud
|
libcloud/test/compute/test_dimensiondata_v2_4.py
|
Python
|
apache-2.0
| 160,650 | 0.001332 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from types import GeneratorType
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import ET
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import (
DimensionDataAPIException,
NetworkDomainServicePlan,
)
from libcloud.common.dimensiondata import (
DimensionDataServerCpuSpecification,
DimensionDataServerDisk,
DimensionDataServerVMWareTools,
)
from libcloud.common.dimensiondata import DimensionDataTag, DimensionDataTagKey
from libcloud.common.dimensiondata import (
DimensionDataIpAddress,
DimensionDataIpAddressList,
DimensionDataChildIpAddressList,
DimensionDataPortList,
DimensionDataPort,
DimensionDataChildPortList,
)
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.compute.drivers.dimensiondata import (
DimensionDataNodeDriver as DimensionData,
)
from libcloud.compute.drivers.dimensiondata import DimensionDataNic
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
from libcloud.utils.xml import fixxpath, findtext, findall
class DimensionData_v2_4_Tests(unittest.TestCase):
def setUp(self):
DimensionData.connectionCls.active_api_version = "2.4"
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
DimensionData(*DIMENSIONDATA_PARAMS, region="blah")
def test_invalid_creds(self):
DimensionDataMockHttp.type = "UNAUTHORIZED"
with self.assertRaises(InvalidCredsError):
self.driver.list_nodes()
def test_get_account_details(self):
DimensionDataMockHttp.type = None
ret = self.driver.connection.get_account_details()
self.assertEqual(ret.full_name, "Test User")
self.assertEqual(ret.first_name, "Test")
self.assertEqual(ret.email, "test@example.com")
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_loc = ret[0]
self.assertEqual(first_loc.id, "NA3")
self.assertEqual(first_loc.name, "US - West")
self.assertEqual(first_loc.country, "US")
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 7)
def test_node_extras(self):
DimensionD
|
ataMockHttp.type = None
ret = self.driver.list_nodes()
|
self.assertTrue(
isinstance(ret[0].extra["vmWareTools"], DimensionDataServerVMWareTools)
)
self.assertTrue(
isinstance(ret[0].extra["cpu"], DimensionDataServerCpuSpecification)
)
self.assertTrue(isinstance(ret[0].extra["disks"], list))
self.assertTrue(isinstance(ret[0].extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(ret[0].extra["disks"][0].size_gb, 10)
self.assertTrue(isinstance(ret[1].extra["disks"], list))
self.assertTrue(isinstance(ret[1].extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(ret[1].extra["disks"][0].size_gb, 10)
def test_server_states(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(ret[0].state == "running")
self.assertTrue(ret[1].state == "starting")
self.assertTrue(ret[2].state == "stopping")
self.assertTrue(ret[3].state == "reconfiguring")
self.assertTrue(ret[4].state == "running")
self.assertTrue(ret[5].state == "terminated")
self.assertTrue(ret[6].state == "stopped")
self.assertEqual(len(ret), 7)
def test_list_nodes_response_PAGINATED(self):
DimensionDataMockHttp.type = "PAGINATED"
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 9)
def test_paginated_mcp2_call_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "EMPTY"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server"
)
empty_node_list = []
for node_list in node_list_generator:
empty_node_list.extend(node_list)
self.assertTrue(len(empty_node_list) == 0)
def test_paginated_mcp2_call_PAGED_THEN_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "PAGED_THEN_EMPTY"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server"
)
final_node_list = []
for node_list in node_list_generator:
final_node_list.extend(node_list)
self.assertTrue(len(final_node_list) == 2)
def test_paginated_mcp2_call_with_page_size(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "PAGESIZE50"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server", page_size=50
)
self.assertTrue(isinstance(node_list_generator, GeneratorType))
# We're making sure here the filters make it to the URL
# See _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS for asserts
def test_list_nodes_response_strings_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
ret = self.driver.list_nodes(
ex_location="fake_loc",
ex_name="fake_name",
ex_ipv6="fake_ipv6",
ex_ipv4="fake_ipv4",
ex_vlan="fake_vlan",
ex_image="fake_image",
ex_deployed=True,
ex_started=True,
ex_state="fake_state",
ex_network="fake_network",
ex_network_domain="fake_network_domain",
)
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 7)
node = ret[3]
self.assertTrue(isinstance(node.extra["disks"], list))
self.assertTrue(isinstance(node.extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(node.size.id, "1")
self.assertEqual(node.image.id, "3ebf3c0f-90fe-4a8b-8585-6e65b316592c")
self.assertEqual(node.image.name, "WIN2008S/32")
disk = node.extra["disks"][0]
self.assertEqual(disk.id, "c2e1f199-116e-4dbc-9960-68720b832b0a")
self.assertEqual(disk.scsi_id, 0)
self.assertEqual(disk.size_gb, 50)
self.assertEqual(disk.speed, "STANDARD")
self.assertEqual(disk.state, "NORMAL")
def test_list_nodes_response_LOCATION(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
first_loc = ret[0]
ret = self.driver.list_nodes(ex_location=first_loc)
for node in ret:
self.assertEqual(node.extra["datacenterId"], "NA3")
def test_list_nodes_response_LOCATION_STR(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes(ex_location="NA3")
for node in ret:
|
xaque208/dotfiles
|
bin/init.py
|
Python
|
mit
| 239 | 0 |
#! /usr/
|
bin/env python
import os
from dotfiles import Dotfiles
def main():
homedir = os.environ['HOME']
dotfilesRoot = homedir + '/dotfiles'
d = Dotfiles(dotfilesRoot)
d.setup()
if __name__ =
|
= "__main__":
main()
|
thalamus/Flexget
|
flexget/plugins/plugin_sftp.py
|
Python
|
mit
| 14,868 | 0.003228 |
from __future__ import unicode_literals, division, absolute_import
from urlparse import urljoin, urlparse
from collections import namedtuple
from itertools import groupby
import logging
import os
import posixpath
from functools import partial
import time
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.template import render_from_entry, RenderError
log = logging.getLogger('sftp')
ConnectionConfig = namedtuple('ConnectionConfig', ['host', 'port', 'username', 'password',
'private_key', 'private_key_pass'])
# retry configuration contants
CONNECT_TRIES = 3
RETRY_INTERVAL = 15
RETRY_STEP = 5
SOCKET_TIMEOUT = 15
# make separate path instances for local vs remote path styles
localpath = os.path
remotepath = posixpath #pysftp uses POSIX style paths
try:
import pysftp
logging.getLogger("paramiko").setLevel(logging.ERROR)
except:
pysftp = None
def sftp_connect(conf):
"""
Helper function to connect to an sftp server
"""
sftp = None
tries = CONNECT_TRIES
retry_interval = RETRY_INTERVAL
while not sftp:
try:
sftp = pysftp.Connection(host=conf.host, username=conf.username,
private_key=conf.private_key, password=conf.password,
port=conf.port, private_key_pass=conf.private_key_pass)
sftp.timeout = SOCKET_TIMEOUT
log.verbose('Connected to %s' % conf.host)
except Exception as e:
if not tries:
raise e
else:
log.debug('Caught exception: %s' % e)
log.warn('Failed to connect to %s; waiting %d seconds before retrying.' %
(conf.host, retry_interval))
time.sleep(retry_interval)
tries -= 1
retry_interval += RETRY_STEP
return sftp
def dependency_check():
"""
Check if pysftp module is present
"""
if not pysftp:
raise plugin.DependencyError(issued_by='sftp',
missing='pysftp',
message='sftp plugin requires the pysftp Python module.')
class SftpList(object):
"""
Generate entries from SFTP. This plugin requires the pysftp Python module and its dependencies.
Configuration:
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
recursive: Indicates whether the listing should be recursive
get_size: Indicates whetern to calculate the size of the remote file/directory.
WARNING: This can be very slow when computing the size of directories!
files_only: Indicates wheter to omit diredtories from the results.
dirs: List of directories to download
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
recursive: False
get_size: True
files_only: False
dirs:
- '/path/to/list/'
- '/another/path/'
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': 22},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'})
},
'additionProperties': False,
'required': ['host', 'username']
}
def prepare_config(self, config):
"""
Sets defaults for the provided configuration
"""
config.setdefault('port', 22)
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
def on_task_input(self, task, config):
"""
Input task handler
"""
dependency_check()
config = self.prepare_config(config)
host = config['host']
port = config['port']
username = config['username']
password = config['password']
private_key = config['private_key']
private_key_pass = config['private_key_pass']
files_only = config['files_only']
recursive = config['recursive']
get_size = config['get_size']
dirs = config['dirs']
if not isinstance(dirs, list):
dirs = [dirs]
login_str = ''
port_str = ''
if username and password:
login_str = '%s:%s@' % (username, password)
elif username:
login_str = '%s@' % username
if port and port != 22:
port_str = ':%d' % port
url_prefix = 'sftp://%s%s%s/' % (login_str, host, port_str)
log.debug('Connecting to %s' % host)
conn_conf = ConnectionConfig(host, port, username, password, private_key, private_key_pass)
try:
sftp = sftp_connect(conn_conf)
except Exception as e:
raise plugin.PluginError('Failed to connect to %s (%s)' % (host, e))
entries = []
def file_size(path):
"""
Helper function to get the size of a node
"""
return sftp.lstat(path).st_size
def dir_size(path):
"""
Walk a directory to get its size
"""
sizes = []
def node_size(f):
sizes.append(file_size(f))
sftp.walktree(path, node_size, node_size, node_size, True)
size = sum(sizes)
return size
def handle_node(path, size_handler, is_dir):
"""
Generic helper function for handling a remote file system node
"""
if is_dir and files_only:
return
url = urljoin(url_prefix, sftp.normalize(path))
title = remotepath.basename(path)
entry = Entry(title, url)
if get_size:
try:
size = size_handler(path)
except Exception as e:
log.error('Failed to get size for %s (%s)' % (path, e))
size = -1
entry['content_size'] = size
if private_key:
entry['private_key'] = private_key
if private_key_pass:
entry['private_key_pass'] = private_key_pass
entries.append(entry)
# create helper functions to handle files and directories
handle_file = partial(handle_node, size_handler=file_size, is_dir=False)
handle_dir =
|
partial(handle_node, size_handler=dir_size, is_dir=True)
|
def handle_unknown(path):
"""
Skip unknown files
"""
log.warn('Skipping unknown file: %s' % path)
# the business end
for dir in dirs:
try:
sftp.walktree(dir, handle_file, handle_dir, handle_unknown, recursive)
except IOError as e:
log.error('Failed to open %s (%s)' % (dir, e))
continue
sftp.close()
return entries
class SftpDownload(object):
"""
Download files from a SFTP server. This plugin requires the pysftp Python module and its
dependenci
|
pennersr/django-allauth
|
allauth/socialaccount/providers/patreon/views.py
|
Python
|
mit
| 2,047 | 0.001466 |
"""
Views for PatreonProvider
https://www.patreon.com/platform/documentation/oauth
"""
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import API_URL, USE_API_V2, PatreonProvider
class PatreonOAuth2Adapter(OAuth2Adapter):
provider_id = PatreonProvider.id
access_token_url = "https://www.patreon.com/api/oauth2/token"
authorize_url = "https://www.patreon.com/oauth2/authorize"
profile_url = "{0}/{1}".format(
API_URL,
"identity?include=memberships&fields%5Buser%5D=email,first_name,"
"full_name,image_url,last_name,social_connections,"
"thumb_url,url,vanity"
if USE_API_V2
else "current_user",
)
def complete_login(self, request, app, token, **kwargs):
resp = requests.
|
get(
self.profile_url,
headers={"Authorization": "Bearer " + token.token},
)
|
extra_data = resp.json().get("data")
if USE_API_V2:
# Extract tier/pledge level for Patreon API v2:
try:
member_id = extra_data["relationships"]["memberships"]["data"][0]["id"]
member_url = (
"{0}/members/{1}?include="
"currently_entitled_tiers&fields%5Btier%5D=title"
).format(API_URL, member_id)
resp_member = requests.get(
member_url,
headers={"Authorization": "Bearer " + token.token},
)
pledge_title = resp_member.json()["included"][0]["attributes"]["title"]
extra_data["pledge_level"] = pledge_title
except (KeyError, IndexError):
extra_data["pledge_level"] = None
pass
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PatreonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PatreonOAuth2Adapter)
|
ybdarrenwang/DuplicatedPhotoFinder
|
main.py
|
Python
|
bsd-3-clause
| 2,866 | 0.008374 |
import sys, Tkinter, tkFont, ttk
sys.path.insert(0, "./src/")
import button, database
from config import *
# Note: need to set size for bg_canvas here; otherwise it will grow disregard the size set while created!
def AuxscrollFunction(event):
bg_canvas.configure(scrollregion=bg_canvas.bbox("all"), height=THUMB_HEIGHT)
# create root
root = Tkinter.Tk()
root.geometry(str(WINDOW_WIDTH)+"x"+str(WINDOW_HEIGHT)+"+100+100")
root.minsize(width=WINDOW_WIDTH, height=WINDOW_HEIGHT)
root.title("Find Duplicated Photos")
Tkinter.Grid.columnconfigure(root, 0, weight=0)
Tkinter.Grid.columnconfigure(root, 1, weight=0)
Tkinter.Grid.columnconfigure(root, 2, weight=int(DISPLAY_WIDTH/INFO_WIDTH))
Tkinter.Grid.columnconfigure(root, 3, weight=0)
Tkinter.Grid.rowconfigure(root, 0, weight=int(DISPLAY_HEIGHT/THUMB_HEIGHT))
Tkinter.Grid.rowconfigure(root, 1, weight=0)
Tkinter.Grid.rowconfigure(root, 2, weight=0)
# create frame for displaying selected photo
display_photo_frame = Tkinter.Frame(root, height=DISPLAY_HEIGHT, width=DISPLAY_WIDTH)
display_photo_frame.grid(row=0, column=0, columnspan=3)
# create frame for displaying file info
display_photo_info_frame = Tkinter.Frame(root, height=DISPLAY_HEIGHT, width=INFO_WIDTH, background="white")
display_photo_info_frame.grid(row=0, column=3, sticky=Tkinter.E+Tkinter.W+Tkinter.N+Tkinter.S)
display_photo_info_frame.pack_propagate(False) # by default the frame will shrink to whatever is inside of it
# create background for scroll bar
bg_frame = Tkinter.Frame(root, height=THUMB_HEIGHT)
bg_frame.grid(row=1, column=0, columnspan=4, sticky=Tkinter.E+Tkinter.W+Tkinter.N+Tkinter.S)
bg_canvas = Tkinter.Canvas(bg_frame, background='white')
xscrollbar = Tkinter.Scrollbar(bg_frame, orient="horizontal", command=bg_canvas.xview)
xscrollbar.pack(side=Tkinter.BOTTOM, fill="x")
xscrollbar.grid_forget()
bg_canvas.configure(xscrollcommand=xscrollbar.set)
bg_canvas.pack(fill=Tkinter.BOTH, expand=True, pady=5)
# create frame for duplicated photo batch display
batch_photo_frame = Tkinter.Frame(bg_canvas, height=THUMB_HEIGHT, background='white')
bg_canvas.create_window((0,0),window=batch_photo_frame,anchor='nw')
batch_photo_frame.bind("<Configure>", AuxscrollFun
|
ction)
# Note: don't pack batch_photo_frame here, otherwise scroll bar won't show!!!
# create photo database and loading progress bar
progress_bar = ttk.Progressbar(root, orient=Tkinter.HORIZONTAL, length=PROGRESS_BAR_LENGTH, mode='determinate')
progress_bar.grid(row=2, column=2, columnspan=2, sticky=Tkinter.E+Tkinter.W, padx=10)
db = database.Database(progress_bar)
# create buttons
#bu
|
tton_cfg = button.ConfigButton(root, db, 2, 3)
button_next = button.NextBatchButton(root, batch_photo_frame, display_photo_frame, display_photo_info_frame, db, 2, 1)
button_open = button.OpenFolderButton(root, batch_photo_frame, db, button_next, 2, 0)
root.mainloop()
|
alexlo03/ansible
|
contrib/inventory/zabbix.py
|
Python
|
gpl-3.0
| 4,795 | 0.002086 |
#!/usr/bin/env python
# (c) 2013, Greg Buehler
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
If you want to run with --limit against a host group with space in the
name, use asterisk. For example --limit="Linux*servers".
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6 and 3.2.3.
"""
from __future__ import print_function
import os
import sys
import argparse
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
from zabbix_api import ZabbixAPI
except:
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
file=sys.stderr)
sys.exit(1)
import json
class ZabbixInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
conf_path = './zabbix.ini'
if not os.path.exists(conf_path):
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
if os.path.exists(conf_path):
config.read(conf_path)
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
# ssl certs
if config.has_option('zabbix', 'validate_certs'):
if config.get('zabbix', 'validate_certs') in ['false', 'False', False]:
self.validate_certs = False
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
data = {'ansible_ssh_host': name}
return data
def get_list(self, api):
hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'})
data = {}
data[self.defaultgroup] = self.hoststub()
for host in hostsData:
hostname = host['name']
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
|
groupname = group['name']
if groupname not in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
# Prevents Ansible from calling this script for each server with --host
data['_meta'] = {'hostvars': self.meta}
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_use
|
rname = None
self.zabbix_password = None
self.validate_certs = True
self.meta = {}
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server, validate_certs=self.validate_certs)
api.login(user=self.zabbix_username, password=self.zabbix_password)
except BaseException as e:
print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
print(json.dumps(data, indent=2))
else:
print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()
|
cpennington/edx-platform
|
openedx/core/djangoapps/credit/migrations/0006_creditrequirement_alter_ordering.py
|
Python
|
agpl-3.0
| 387 | 0 |
# -*- coding: utf-8 -*-
from __future__ impor
|
t unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('credit', '0005_creditrequirement_sort_value'),
]
operations = [
migrations.AlterModelOptions(
name='creditrequirement',
options={'ordering': ['sort_value
|
']},
),
]
|
zerok/django-zsutils
|
django_zsutils/templatetags/zsutils/taghelpers.py
|
Python
|
bsd-3-clause
| 1,704 | 0.005282 |
"""
:Requirements: django-tagging
This module contains some additional helper tags for the django-tagging
project. Note that the functionality here might already be present in
django-tagging but perhaps with some slightly different behaviour or
usage.
"""
from django import template
from django.core.urlresolvers import reverse as url_reverse
from tagging.utils import parse_tag_input
register = template.Library()
class TagsForObjectNode(template.Node):
def __init__(self, tags_string, urlname, junctor=None, last_junctor=None):
self.tags_string = template.Variable(tags_string)
self.junctor = junctor is None and ', ' or junctor.lstrip('"').rstrip('"')
self.last_junctor = last_junctor is None and ' and ' or last_junctor.lstrip('"').rstrip('"')
self.urlname = urlname
def render(self, context):
tags = parse_tag_input(self.tags_string.resolve(context))
tags = ['<a href="%s" rel="tag">%s</a>' % (url_reverse(self.urlname, kwargs={'tag':t}), t) for t in tags]
if len(tags) > 2:
first_part = self.junctor.join(tags[:-1])
return first_part + self.last_junctor + tags[-1]
if len(tags) == 2:
return self.last_junctor.join(tags)
return self.junctor.join(tags
|
)
@register.tag('object_tags')
def tags_for_object
|
(parser, token):
"""
Simple tag for rendering tags of an object
Usage::
{% object_tags object.tags blog-tag ", " " and " %}
The last two arguments determine the junctor between the tag names with
the last being the last junctor being used.
"""
variables = token.split_contents()[1:]
return TagsForObjectNode(*variables)
|
leeclarke/homePi
|
src/python/zipInstall.py
|
Python
|
gpl-3.0
| 2,124 | 0.013183 |
"""
Manages downloading and updating applications throught he use of a zip file hosted on HomePi server.
"""
import datetime
import os.path
import zipfile
import urllib2
#Test call to verify thins work.
def print_info(archive_name):
zf = zipfile.ZipFile(archive_name)
for info in zf.infolist():
print info.filename
print '\tComment:\t', info.comment
print '\tModified:\t', datetime.datetime(*info.date_time)
print '\tSystem:\t\t', info.create_system, '(0 = Windows, 3 = Unix
|
)'
print '\tZIP version:\t', info.create_version
print '\tCompressed:\t', info.compress_size, 'bytes'
print '\tUncompressed:\t', info.file_size, 'bytes'
print
# Extracts files from given archive into targetlocation. Preserves archive folder structure.
def extractFiles(archive_name,targetLocation):
zf = zipfile.ZipFile(archive_name)
|
zf.extractall(path=targetLocation)
# Download archive file for unpacking.
def retrieveZipfile(saveLocation, archiveURI, currentVersion = -1):
fileName = os.path.basename(archiveURI)
print 'downloading file: %s' % fileName
try:
response = urllib2.urlopen(archiveURI)
#Check to see if new version
serverVersion = 0
if response.info().getheader('file-version') is not None:
serverVersion = int(response.info().getheader('file-version'))
##version check, download if new
if currentVersion< serverVersion:
fileDest = os.path.join(saveLocation,fileName)
with open(fileDest, "wb") as code:
code.write(response.read())
print 'Download done'
except urllib2.HTTPError as h:
print 'Error downloading file: %s' % h
#Test to see if everything runs smoothly, should add verification and clean up
if __name__ == '__main__':
#print_info('../test/resources/ansi161.zip')
extractFiles('../test/resources/ansi161.zip', '../test/resources/temp/')
retrieveZipfile( '../test/resources/temp/', 'http://the.earth.li/~sgtatham/putty/latest/x86/putty.zip')
|
sargas/scipy
|
scipy/optimize/tests/test_minpack.py
|
Python
|
bsd-3-clause
| 13,744 | 0.006694 |
"""
Unit tests for optimization routines from minpack.py.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, assert_almost_equal, assert_array_equal, \
assert_array_almost_equal, TestCase, run_module_suite, assert_raises, \
assert_allclose
import numpy as np
from numpy import array, float64, matrix
from scipy import optimize
from scipy.optimize.minpack import leastsq, curve_fit, fixed_point
class ReturnShape(object):
"""This class exists to create a callable that does not have a '__name__' attribute.
__init__ takes the argument 'shape', which should be a tuple of ints. When an instance
it called with a single argument 'x', it returns numpy.ones(shape).
"""
def __init__(self, shape):
self.shape = shape
def __call__(self, x):
return np.ones(self.shape)
def dummy_func(x, shape):
"""A function that returns an array of ones of the given shape.
`x` is ignored.
"""
return np.ones(shape)
# Function and jacobian for tests of solvers for systems of nonlinear
# equations
def pressure_network(flow_rates, Qtot, k):
"""Evaluate non-linear equation system representing
the pressures and flows in a system of n parallel pipes::
f_i = P_i - P_0, for i = 1..n
f_0 = sum(Q_i) - Qtot
Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
Q is the flow rate.
Parameters
----------
flow_rates : float
A 1D array of n flow rates [kg/s].
k : float
A 1D array of n valve coefficients [1/kg m].
Qtot : float
A scalar, the total input flow rate [kg/s].
Returns
-------
F : float
A 1D array, F[i] == f_i.
"""
|
P = k * flow_rates**2
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
return F
def pressure_network_jacobian(flow_rates, Qtot, k):
"""Return the jacobian of the equation system F(flow_rates)
computed by `pressure_network` with respect to
*flow_rates*. See `pressure_network` for the detailed
description of parrameters.
Returns
-------
jac : float
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
|
and *f_i* and *Q_i* are described in the doc for `pressure_network`
"""
n = len(flow_rates)
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
jac = np.empty((n, n))
jac[:n-1, :n-1] = pdiff * 0
jac[:n-1, n-1] = 0
jac[n-1, :] = np.ones(n)
return jac
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
return pressure_network(flow_rates, Qtot, k), \
pressure_network_jacobian(flow_rates, Qtot, k)
class TestFSolve(TestCase):
def test_pressure_network_no_gradient(self):
"""fsolve without gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows, info, ier, mesg = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
full_output=True)
assert_array_almost_equal(final_flows, np.ones(4))
assert_(ier == 1, mesg)
def test_pressure_network_with_gradient(self):
"""fsolve with gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
fprime=pressure_network_jacobian)
assert_array_almost_equal(final_flows, np.ones(4))
def test_wrong_shape_func_callable(self):
"""The callable 'func' has no '__name__' attribute."""
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
def test_wrong_shape_fprime_callable(self):
"""The callables 'func' and 'deriv_func' have no '__name__' attribute."""
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_wrong_shape_fprime_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_float32(self):
func = lambda x: np.array([x[0] - 1000, x[1] - 10000], dtype=np.float32)**2
p = optimize.fsolve(func, np.array([1, 1], np.float32))
assert_allclose(func(p), [0, 0], atol=1e-3)
class TestRootHybr(TestCase):
def test_pressure_network_no_gradient(self):
"""root/hybr without gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='hybr', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient(self):
"""root/hybr with gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = matrix([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
args=(Qtot, k), method='hybr',
jac=pressure_network_jacobian).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient_combined(self):
"""root/hybr with gradient and function combined, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network_fun_and_grad,
initial_guess, args=(Qtot, k),
method='hybr', jac=True).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestRootLM(TestCase):
def test_pressure_network_no_gradient(self):
"""root/lm without gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='lm', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestLeastSq(TestCase):
def setUp(self):
x = np.linspace(0, 10, 40)
a,b,c = 3.1, 42, -304.2
self.x = x
self.abc = a,b,c
y_true = a*x**2 + b*x + c
np.random.seed(0)
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
def residuals(self, p, y, x):
a,b,c = p
err = y-(a*x**2 + b*x + c)
return err
def test_basic(self):
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x))
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)'%ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_full_output(self):
p0 = matrix([0,0,0])
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg)
def test_input_untouched(self):
p0 = array([0,0,0],dtype=float64)
p0_copy = array(p0, copy=True)
full_output = leastsq(self.residuals, p0,
|
stratis-storage/stratis-cli
|
tests/whitebox/monkey_patching/test_keyboard_interrupt.py
|
Python
|
apache-2.0
| 1,934 | 0 |
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test management of KeyboardInterrupt in stratisd.
"""
# isort: LOCAL
import stratis_cli
from .._misc import SimTestCase
class KeyboardInterruptTestCase(SimTestCase):
"""
Test behavior of stratis on KeyboardInterrupt.
"""
def test_catch_keyboard_exception(self):
"""
Verify that the KeyboardInterrupt is propagated by the run() method.
./bin/stratis contains a try block at the outermost level which
then catches the Keyboa
|
rdInterrupt and exits with an error message.
The KeyboardInterrupt is most likely raised in the dbus-python
method which is actually communicating on the D-Bus, but it is
fairly difficult to get at that method. Instead settle for getti
|
ng
at the calling method generated by dbus-python-client-gen.
"""
def raise_keyboard_interrupt(_):
"""
Just raise the interrupt.
"""
raise KeyboardInterrupt()
# pylint: disable=import-outside-toplevel
# isort: LOCAL
from stratis_cli._actions import _data
# pylint: disable=protected-access
stratis_cli._actions._data.Manager.Properties.Version.Get = (
raise_keyboard_interrupt
)
with self.assertRaises(KeyboardInterrupt):
stratis_cli.run()(["daemon", "version"])
|
lorisercole/thermocepstrum
|
sportran/md/tools/resample.py
|
Python
|
gpl-3.0
| 1,531 | 0.001306 |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import lfilter
def filter_and_sample(y_big, W, DT, window='rectangular', even_NSTEPS=True, detrend=False, drop_first=True):
"""Filter signal with moving average window of width W and then sample it
with time step DT."""
if (W > 1):
if (window == 'rectangular'):
y_f = lfilter((1. / W) * np.ones(W), 1., y_big, axis
|
=0)
else:
raise NotImplementedError('Not implemented window type.')
|
# drop first W steps (initial conditions)
if drop_first:
y = y_f[(W - 1)::DT]
else:
y = y_f[::DT]
else:
y = y_big[::DT]
# remove the mean
if detrend:
y = y - np.mean(y, axis=0)
# keeps an even number of points
if even_NSTEPS:
if (y.shape[0] % 2 == 1):
return y[:-1]
return y
def resample_psd(freqs, psd, cutfrequency):
if (cutfrequency >= freqs[-1]):
return freqs, psd
NFREQS = freqs.size - 1
cutidx = (np.abs(freqs - cutfrequency)).argmin()
if (NFREQS % cutidx == 0): # cut frequency is sub-multiple of max freq
DT = NFREQS / cutidx
if (DT > 2):
raise Warning('DT Not implemented.')
newpsd = psd.copy()[:cutidx + 1]
newpsd = newpsd + psd[:-cutidx - 2:-1]
newfreqs = freqs[:cutidx + 1]
#log.write_log(cutidx, DT, freqs[cutidx], newpsd.size)
else:
raise NotImplementedError('Not implemented.')
return newfreqs, newpsd
|
nickmcummins/misc-tools
|
piwigo/piwigo_symlink_local_album.py
|
Python
|
gpl-3.0
| 1,897 | 0.005799 |
import pathlib
import argparse
import os
IMGFORMAT = 'JPG'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Symlink a local album directory to the "galleries" subdirecto
|
ry in a local Piwigo instance.')
parser.add_argument('src_album', type=str, help='Location of album to symlink, relative to ALBUMS_ROOT')
parser.add_argument('piwigo_dir', type=str, help='Location of local Piwigo instance (e.g. /srv/http/piwigo)')
parser.add_argument('--sudo', '-su', action='store_true', help='Execute shell commands using sudo')
parser.add_argument('--range', type=str, default=None, help='Only create symlinks for photos in numeric range')
args = parser.parse_arg
|
s()
src_album, piwigo_dir, use_sudo = args.src_album, args.piwigo_dir, args.sudo
minrange = int(args.range.split('-')[0]) if args.range is not None else 0
maxrange = int(args.range.split('-')[1]) if args.range is not None else 1000000
albums_root = os.getenv('ALBUMS_ROOT', f'{str(pathlib.Path.home())}/Pictures/Albums')
def sh(command):
if use_sudo:
command = f'sudo {command}'
os.popen(command).read()
def symlink_img(imgfilename):
piwigo_album_dir = f'{piwigo_dir}/galleries/{src_album}'
if not os.path.exists(piwigo_album_dir):
sh(f'mkdir -p {piwigo_album_dir}')
sh(f'ln -s {albums_root}/{src_album}/{IMGFORMAT}/{imgfilename} {piwigo_album_dir}')
def is_expected_imgformat(imgfilename):
return imgfilename.split('.')[-1].lower() == IMGFORMAT.lower()
def is_in_range(imgfilename):
imgnum = int(imgfilename.split('.')[0].split('_')[-1])
return minrange <= imgnum <= maxrange
imgs = list(filter(lambda file: is_expected_imgformat(file) and is_in_range(file), os.listdir(f'{albums_root}/{src_album}/{IMGFORMAT}')))
for img in imgs:
symlink_img(img)
|
sigdotcom/acm.mst.edu
|
ACM_General/tools/tests.py
|
Python
|
gpl-3.0
| 857 | 0 |
"""
Contains all unit tests for the Tools app.
"""
# Django
# from django.conf import settings
# from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.urls import reverse
# from django.utils import timezone
# local Django
# from accounts.models import User
# from events.forms import EventForm
# from sigs.models import SIG
# from events.models impor
|
t Event
class HomeViewCase(TestCase):
"""
A class that tests whether tools functions work
"""
def test_view_responses(self):
"""
Makes requests to each page of the site and asserts a 200 response code
(or succe
|
ss)
"""
response = self.client.get(reverse('tools:membership'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tools/membership.html')
|
datagutten/comics
|
comics/comics/gws.py
|
Python
|
agpl-3.0
| 839 | 0 |
from co
|
mics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Girls With Slingshots'
language = 'en'
url = 'http://www.girlswithslingshots.com
|
/'
start_date = '2004-09-30'
rights = 'Danielle Corsetto'
class Crawler(CrawlerBase):
history_capable_days = 30
schedule = 'Mo,Tu,We,Th,Fr'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
feed = self.parse_feed('http://www.girlswithslingshots.com/feed/')
for entry in feed.for_date(pub_date):
page = self.parse_page(entry.link)
url = page.src('img#comic')
title = entry.title.replace('Girls with Slingshots - ', '')
text = page.title('img#comic')
return CrawlerImage(url, title, text)
|
sometallgit/AutoUploader
|
Python27/Lib/test/test_ensurepip.py
|
Python
|
mit
| 9,313 | 0 |
import unittest
import os
import os.path
import contextlib
import sys
import test._mock_backport as mock
import test.test_support
import ensurepip
import ensurepip._uninstall
class TestEnsurePipVersion(unittest.TestCase):
def test_returns_version(self):
self.assertEqual(ensurepip._PIP_VERSION, ensurepip.version())
class EnsurepipMixin:
def setUp(self):
run_pip_patch = mock.patch("ensurepip._run_pip")
self.run_pip = run_pip_patch.start()
self.addCleanup(run_pip_patch.stop)
# Avoid side effects on the actual os module
real_devnull = os.devnull
os_patch = mock.patch("ensurepip.os")
patched_os = os_patch.start()
self.addCleanup(os_patch.stop)
patched_os.devnull = real_devnull
patched_os.path = os.path
self.os_environ = patched_os.environ = os.environ.copy()
class TestBootstrap(EnsurepipMixin, unittest.TestCase):
def test_basic_bootstrapping(self):
ensurepip.bootstrap()
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "setuptools", "pip",
],
mock.ANY,
)
additional_paths = self.run_pip.call_args[0][1]
self.assertEqual(len(additional_paths), 2)
def test_bootstrapping_with_root(self):
ensurepip.bootstrap(root="/foo/bar/")
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--root", "/foo/bar/",
"setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_user(self):
ensurepip.bootstrap(user=True)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--user", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_upgrade(self):
ensurepip.bootstrap(upgrade=True)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--upgrade", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_1(self):
ensurepip.bootstrap(verbosity=1)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-v", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_2(self):
ensurepip.bootstrap(verbosity=2)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-vv", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_3(self):
ensurepip.bootstrap(verbosity=3)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-vvv", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_regular_install(self):
ensurepip.bootstrap()
self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "install")
def test_bootstrapping_with_alt_install(self):
ensurepip.bootstrap(altinstall=True)
self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "altinstall")
def test_bootstrapping_with_default_pip(self):
ensurepip.bootstrap(default_pip=True)
self.assertNotIn("ENSUREPIP_OPTIONS", self.os_environ)
def test_altinstall_default_pip_conflict(self):
with sel
|
f.assertRaises(ValueError):
ensurepip.bootstrap(altinstall=True, default_pip=True)
self.assertFalse(self.run_pip.called)
def test_pip_environment_variables_removed(self):
# ensurepip deliberately ignores all pip environment variables
# See http://bugs.python.org/issue19734 for details
self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "te
|
st fodder"
ensurepip.bootstrap()
self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ)
def test_pip_config_file_disabled(self):
# ensurepip deliberately ignores the pip config file
# See http://bugs.python.org/issue20053 for details
ensurepip.bootstrap()
self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull)
@contextlib.contextmanager
def fake_pip(version=ensurepip._PIP_VERSION):
if version is None:
pip = None
else:
class FakePip():
__version__ = version
pip = FakePip()
sentinel = object()
orig_pip = sys.modules.get("pip", sentinel)
sys.modules["pip"] = pip
try:
yield pip
finally:
if orig_pip is sentinel:
del sys.modules["pip"]
else:
sys.modules["pip"] = orig_pip
class TestUninstall(EnsurepipMixin, unittest.TestCase):
def test_uninstall_skipped_when_not_installed(self):
with fake_pip(None):
ensurepip._uninstall_helper()
self.assertFalse(self.run_pip.called)
def test_uninstall_skipped_with_warning_for_wrong_version(self):
with fake_pip("not a valid version"):
with test.test_support.captured_stderr() as stderr:
ensurepip._uninstall_helper()
warning = stderr.getvalue().strip()
self.assertIn("only uninstall a matching version", warning)
self.assertFalse(self.run_pip.called)
def test_uninstall(self):
with fake_pip():
ensurepip._uninstall_helper()
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_1(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=1)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-v", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_2(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=2)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-vv", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_3(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=3)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-vvv",
"pip", "setuptools",
]
)
def test_pip_environment_variables_removed(self):
# ensurepip deliberately ignores all pip environment variables
# See http://bugs.python.org/issue19734 for details
self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder"
with fake_pip():
ensurepip._uninstall_helper()
self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ)
def test_pip_config_file_disabled(self):
# ensurepip deliberately ignores the pip config file
# See http://bugs.python.org/issue20053 for details
with fake_pip():
ensurepip._uninstall_helper()
self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull)
# Basic testing of the main functions and their argument parsing
EXPECTED_VERSION_OUTPUT = "pip " + ensurepip._PIP_VERSION
class TestBootstrappingMainFunction(EnsurepipMixin, unittest.TestCase):
def test_bootstrap_version(self):
with test.test_support.captured_stderr() as stderr:
with self.assertRaises(SystemExit):
ensurepip._main(["--version"])
result = stderr.getvalue().strip()
self.assertEqual(result, EXPECTED_VERSION_OUTPUT)
self.assertFalse(self.run_pip.called)
def test_basic_bootstrapping(self):
e
|
hoechenberger/psychopy
|
psychopy/tests/test_compatibility/test_compatibility.py
|
Python
|
gpl-3.0
| 1,242 | 0.000805 |
# -*- coding: utf-8 -*-
"""Tests for psychopy.compatibility"""
from builtins import objec
|
t
import os
from psychopy import constants, compatibility
import pytest
pytestmark = pytest.mark.skipif(
cons
|
tants.PY3,
reason='Python3 cannot import the old-style pickle files')
thisPath = os.path.split(__file__)[0]
fixtures_path = os.path.join(thisPath, '..', 'data')
class _baseCompatibilityTest(object):
def test_FromFile(self):
dat = compatibility.fromFile(self.test_psydat)
class TestOldTrialHandler(_baseCompatibilityTest):
"""Test Old Trial Handler"""
def setup(self):
self.test_psydat = os.path.join(fixtures_path, 'oldstyle.psydat')
self.test_class = "<class 'psychopy.data.TrialHandler'>"
class TestNewTrialHandler(_baseCompatibilityTest):
"""Test New-styel Trial Handler"""
def setup(self):
self.test_psydat = os.path.join(fixtures_path, 'oldstyle.psydat')
self.test_class = "<class 'psychopy.data.TrialHandler'>"
class TestOldStairHandler(_baseCompatibilityTest):
"""Test Old Trial Handler"""
def setup(self):
self.test_psydat = os.path.join(fixtures_path, 'oldstyle_stair.psydat')
self.test_class = "<class 'psychopy.data.StairHandler'>"
|
andela-sjames/django-bucketlist-application
|
bucketlistapp/bucketlistapi/tests/test_bucketlistitems.py
|
Python
|
gpl-3.0
| 1,569 | 0.01211 |
''' Script used to test bucketlistitem response and request.'''
from rest_framework.authtoken.models import Token
from rest_framework.tes
|
t import APIClient
from django.core.urlresolvers import reverse_lazy
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from .test_bucketlist import ApiHeaderAuthorization
clas
|
s ApiUserBucketlistItems(ApiHeaderAuthorization):
def test_user_can_addbucketlist(self):
data={'name': 'item', 'done': True }
url= reverse_lazy('addbucketitem', kwargs={'id':19})
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class ApiUserItemListDetail(ApiHeaderAuthorization):
def test_user_can_updatebucketlist(self):
data={'name': 'updateitem', 'done': True }
url= reverse_lazy('itemdetail', kwargs={'id':19, 'item_id': 24 })
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_cannot_updatebucketlist(self):
data={'': '', '': '' }
url= reverse_lazy('itemdetail', kwargs={'id':19, 'item_id': 24 })
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_can_deletebucketlist(self):
url= reverse_lazy('itemdetail', kwargs={'id':19, 'item_id': 24 })
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
|
silveregg/moto
|
tests/test_dynamodb2/test_dynamodb_table_with_range_key.py
|
Python
|
apache-2.0
| 47,256 | 0.000825 |
from __future__ import unicode_literals
from decimal import Decimal
import boto
import boto3
from boto3.dynamodb.conditions import Key
import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb2
from boto.exception import JSONResponseError
from tests.helpers import requires_boto_gte
try:
from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex
from boto.dynamodb2.table import Item, Table
from boto.dynamodb2.types import STRING, NUMBER
from boto.dynamodb2.exceptions import ValidationException
from boto.dynamodb2.exceptions import ConditionalCheckFailedException
except ImportError:
pass
def create_table():
table = Table.create('messages', schema=[
HashKey('forum_name'),
RangeKey('subject'),
], throughput={
'read': 10,
'write': 10,
})
return table
def create_table_with_local_indexes():
table = Table.create(
'messages',
schema=[
HashKey('forum_name'),
RangeKey('subject'),
],
throughput={
'read': 10,
'write': 10,
},
indexes=[
AllIndex(
'
|
threads_index',
parts=[
HashKey('forum_name', data_type=STRING),
RangeKey('threads', data_type=NUMBER),
]
)
]
)
return table
def iterate
|
_results(res):
for i in res:
pass
@requires_boto_gte("2.9")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table():
table = create_table()
expected = {
'Table': {
'AttributeDefinitions': [
{'AttributeName': 'forum_name', 'AttributeType': 'S'},
{'AttributeName': 'subject', 'AttributeType': 'S'}
],
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10
},
'TableSizeBytes': 0,
'TableName': 'messages',
'TableStatus': 'ACTIVE',
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'forum_name'},
{'KeyType': 'RANGE', 'AttributeName': 'subject'}
],
'LocalSecondaryIndexes': [],
'ItemCount': 0, 'CreationDateTime': 1326499200.0,
'GlobalSecondaryIndexes': [],
}
}
table.describe().should.equal(expected)
@requires_boto_gte("2.9")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table_with_local_index():
table = create_table_with_local_indexes()
expected = {
'Table': {
'AttributeDefinitions': [
{'AttributeName': 'forum_name', 'AttributeType': 'S'},
{'AttributeName': 'subject', 'AttributeType': 'S'},
{'AttributeName': 'threads', 'AttributeType': 'N'}
],
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0,
'WriteCapacityUnits': 10,
'ReadCapacityUnits': 10,
},
'TableSizeBytes': 0,
'TableName': 'messages',
'TableStatus': 'ACTIVE',
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'forum_name'},
{'KeyType': 'RANGE', 'AttributeName': 'subject'}
],
'LocalSecondaryIndexes': [
{
'IndexName': 'threads_index',
'KeySchema': [
{'AttributeName': 'forum_name', 'KeyType': 'HASH'},
{'AttributeName': 'threads', 'KeyType': 'RANGE'}
],
'Projection': {'ProjectionType': 'ALL'}
}
],
'ItemCount': 0,
'CreationDateTime': 1326499200.0,
'GlobalSecondaryIndexes': [],
}
}
table.describe().should.equal(expected)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
table = create_table()
conn.list_tables()["TableNames"].should.have.length_of(1)
table.delete()
conn.list_tables()["TableNames"].should.have.length_of(0)
conn.delete_table.when.called_with('messages').should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_update_table_throughput():
table = create_table()
table.throughput["read"].should.equal(10)
table.throughput["write"].should.equal(10)
table.update(throughput={
'read': 5,
'write': 15,
})
table.throughput["read"].should.equal(5)
table.throughput["write"].should.equal(15)
table.update(throughput={
'read': 5,
'write': 6,
})
table.describe()
table.throughput["read"].should.equal(5)
table.throughput["write"].should.equal(6)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_add_and_describe_and_update():
table = create_table()
ok = table.put_item(data={
'forum_name': 'LOLCat Forum',
'subject': 'Check this out!',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
ok.should.equal(True)
table.get_item(forum_name="LOLCat Forum", subject='Check this out!').should_not.be.none
returned_item = table.get_item(
forum_name='LOLCat Forum',
subject='Check this out!'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'subject': 'Check this out!',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
returned_item['SentBy'] = 'User B'
returned_item.save(overwrite=True)
returned_item = table.get_item(
forum_name='LOLCat Forum',
subject='Check this out!'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'subject': 'Check this out!',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_partial_save():
table = create_table()
data = {
'forum_name': 'LOLCat Forum',
'subject': 'The LOLz',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(forum_name="LOLCat Forum", subject='The LOLz')
returned_item['SentBy'] = 'User B'
returned_item.partial_save()
returned_item = table.get_item(
forum_name='LOLCat Forum',
subject='The LOLz'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'subject': 'The LOLz',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_put_without_table():
table = Table('undeclared-table')
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.save.when.called_with().should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_missing_item():
table = create_table()
table.get_item.when.called_with(
hash_key='tester',
range_key='other',
).should.throw(ValidationException)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_item_with_undeclared_table():
table = Table('undeclared-table')
table.get_item.when.called_with(test_hash=3241526475).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_item_without_range_key():
table = Table.create('messages', schema=[
HashKey('test_hash'),
RangeKey('test_range'),
], throughput={
'read': 10,
'write': 10,
})
hash_key = 3241526475
range_key = 1234567890987
table.put_item(data={'test_hash': hash_key, 'test_range': range_key})
table.get_item.when.called_with(t
|
flaing/gemrb
|
gemrb/GUIScripts/iwd2/Start.py
|
Python
|
gpl-2.0
| 6,550 | 0.027176 |
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
import GemRB
import LoadScreen
from GUIDefines import *
StartWindow = 0
ProtocolWindow = 0
QuitWindow = 0
QuickLoadSlot = 0
def OnLoad():
global StartWindow, QuickLoadSlot
screen_width = GemRB.GetSystemVariable (SV_WIDTH)
screen_height = GemRB.GetSystemVariable (SV_HEIGHT)
if screen_width == 1024:
GemRB.LoadWindowFrame("STON10L", "STON10R", "STON10T", "STON10B")
GemRB.LoadWindowPack("GUICONN", 800, 600)
#main window
StartWindow = GemRB.LoadWindow(0)
StartWindow.SetFrame ()
ProtocolButton = StartWindow.GetControl(0x00)
NewGameButton = StartWindow.GetControl(0x02)
LoadGameButton = StartWindow.GetControl(0x07)
QuickLoadButton = StartWindow.GetControl(0x03)
JoinGameButton = StartWindow.GetControl(0x0B)
OptionsButton = StartWindow.GetControl(0x08)
QuitGameButton = StartWindow.GetControl(0x01)
StartWindow.CreateLabel(0x0fff0000, 0,0,800,30, "REALMS2", "", IE_FONT_SINGLE_LINE | IE_FONT_ALIGN_CENTER)
VersionLabel = StartWin
|
dow.GetControl(0x0fff0000)
VersionLabel.SetText(GEMRB_VERSION)
ProtocolButton.
|
SetStatus(IE_GUI_BUTTON_ENABLED)
NewGameButton.SetStatus(IE_GUI_BUTTON_ENABLED)
LoadGameButton.SetStatus(IE_GUI_BUTTON_ENABLED)
GemRB.SetVar("SaveDir",1)
Games=GemRB.GetSaveGames()
#looking for the quicksave
EnableQuickLoad = IE_GUI_BUTTON_DISABLED
for Game in Games:
Slotname = Game.GetSaveID()
# quick save is 1
if Slotname == 1:
EnableQuickLoad = IE_GUI_BUTTON_ENABLED
QuickLoadSlot = Game
break
QuickLoadButton.SetStatus(EnableQuickLoad)
JoinGameButton.SetStatus(IE_GUI_BUTTON_DISABLED)
OptionsButton.SetStatus(IE_GUI_BUTTON_ENABLED)
QuitGameButton.SetStatus(IE_GUI_BUTTON_ENABLED)
LastProtocol = GemRB.GetVar("Last Protocol Used")
if LastProtocol == 0:
ProtocolButton.SetText(15413)
elif LastProtocol == 1:
ProtocolButton.SetText(13967)
elif LastProtocol == 2:
ProtocolButton.SetText(13968)
NewGameButton.SetText(13963)
LoadGameButton.SetText(13729)
QuickLoadButton.SetText(33508)
JoinGameButton.SetText(13964)
OptionsButton.SetText(13905)
QuitGameButton.SetText(13731)
QuitGameButton.SetFlags(IE_GUI_BUTTON_CANCEL, OP_OR)
NewGameButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NewGamePress)
QuitGameButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuitPress)
ProtocolButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ProtocolPress)
OptionsButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, OptionsPress)
LoadGameButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, LoadPress)
QuickLoadButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuickLoadPress)
StartWindow.SetVisible(WINDOW_VISIBLE)
GemRB.LoadMusicPL("Theme.mus")
return
def ProtocolPress():
global StartWindow, ProtocolWindow
#StartWindow.Unload()
StartWindow.SetVisible(WINDOW_INVISIBLE)
ProtocolWindow = GemRB.LoadWindow(1)
#Disabling Unused Buttons in this Window
Button = ProtocolWindow.GetControl(2)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_OR)
Button = ProtocolWindow.GetControl(3)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_OR)
Button = ProtocolWindow.GetControl(9)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_OR)
SinglePlayerButton = ProtocolWindow.GetControl(10)
SinglePlayerButton.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
SinglePlayerButton.SetText(15413)
IPXButton = ProtocolWindow.GetControl(0)
IPXButton.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
IPXButton.SetText(13967)
TCPIPButton = ProtocolWindow.GetControl(1)
TCPIPButton.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
TCPIPButton.SetText(13968)
SinglePlayerButton.SetVarAssoc("Last Protocol Used", 0)
IPXButton.SetVarAssoc("Last Protocol Used", 1)
TCPIPButton.SetVarAssoc("Last Protocol Used", 2)
TextArea = ProtocolWindow.GetControl(7)
TextArea.SetText(11316)
DoneButton = ProtocolWindow.GetControl(6)
DoneButton.SetText(11973)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ProtocolDonePress)
DoneButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
ProtocolWindow.SetVisible(WINDOW_VISIBLE)
return
def ProtocolDonePress():
global StartWindow, ProtocolWindow
if ProtocolWindow:
ProtocolWindow.Unload()
ProtocolButton = StartWindow.GetControl(0x00)
LastProtocol = GemRB.GetVar("Last Protocol Used")
if LastProtocol == 0:
ProtocolButton.SetText(15413)
elif LastProtocol == 1:
ProtocolButton.SetText(13967)
elif LastProtocol == 2:
ProtocolButton.SetText(13968)
StartWindow.SetVisible(WINDOW_VISIBLE)
return
def LoadPress():
global StartWindow
if StartWindow:
StartWindow.Unload()
GemRB.SetNextScript("GUILOAD")
return
def QuickLoadPress():
global StartWindow, QuickLoadSlot
LoadScreen.StartLoadScreen()
GemRB.LoadGame(QuickLoadSlot) # load & start game
GemRB.EnterGame()
return
def OptionsPress():
global StartWindow
if StartWindow:
StartWindow.Unload()
GemRB.SetNextScript("Options")
return
def QuitPress():
global StartWindow, QuitWindow
StartWindow.SetVisible(WINDOW_INVISIBLE)
QuitWindow = GemRB.LoadWindow(22)
CancelButton = QuitWindow.GetControl(2)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuitCancelPress)
CancelButton.SetFlags(IE_GUI_BUTTON_CANCEL, OP_OR)
QuitButton = QuitWindow.GetControl(1)
QuitButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuitQuitPress)
QuitButton.SetFlags(IE_GUI_BUTTON_DEFAULT, OP_OR)
TextArea = QuitWindow.GetControl(0)
CancelButton.SetText(13727)
QuitButton.SetText(15417)
TextArea.SetText(19532)
QuitWindow.SetVisible(WINDOW_VISIBLE)
return
def NewGamePress():
global StartWindow
if StartWindow:
StartWindow.Unload()
GemRB.SetNextScript("SPParty")
return
def QuitCancelPress():
global StartWindow, QuitWindow
if QuitWindow:
QuitWindow.Unload()
StartWindow.SetVisible(WINDOW_VISIBLE)
return
def QuitQuitPress():
GemRB.Quit()
return
|
calamares/calamares
|
src/modules/bootloader/main.py
|
Python
|
gpl-3.0
| 28,413 | 0.001901 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <https://calamares.io> ===
#
# SPDX-FileCopyrightText: 2014 Aurélien Gâteau <agateau@kde.org>
# SPDX-FileCopyrightText: 2014 Anke Boersma <demm@kaosx.us>
# SPDX-FileCopyrightText: 2014 Daniel Hillenbrand <codeworkx@bbqlinux.org>
# SPDX-FileCopyrightText: 2014 Benjamin Vaudour <benjamin.vaudour@yahoo.fr>
# SPDX-FileCopyrightText: 2014-2019 Kevin Kofler <kevin.kofler@chello.at>
# SPDX-FileCopyrightText: 2015-2018 Philip Mueller <philm@manjaro.org>
# SPDX-FileCopyrightText: 2016-2017 Teo Mrnjavac <teo@kde.org>
# SPDX-FileCopyrightText: 2017 Alf Gaida <agaida@siduction.org>
# SPDX-FileCopyrightText: 2017-2019 Adriaan de Groot <groot@kde.org>
# SPDX-FileCopyrightText: 2017 Gabriel Craciunescu <crazy@frugalware.org>
# SPDX-FileCopyrightText: 2017 Ben Green <Bezzy1999@hotmail.com>
# SPDX-FileCopyrightText: 2021 Neal Gompa <ngompa13@gmail.com>
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Calamares is Free Software: see the License-Identifier above.
#
import os
import shutil
import subprocess
import libcalamares
from libcalamares.utils import check_target_env_call
import gettext
_ = gettext.translation("calamares-python",
localedir=libcalamares.utils.gettext_path(),
languages=libcalamares.utils.gettext_languages(),
fallback=True).gettext
# This is the sanitizer used all over to tidy up filenames
# to make identifiers (or to clean up names to make filenames).
file_name_sanitizer = str.maketrans(" /()", "_-__")
def pretty_name():
return _("Install bootloader.")
def get_uuid():
"""
Checks and passes 'uuid' to other routine.
:return:
"""
partitions = libcalamares.globalstorage.value("partitions")
for partition in partitions:
if partition["mountPoint"] == "/":
libcalamares.utils.debug("Root partition uuid: \"{!s}\"".format(partition["uuid"]))
return partition["uuid"]
return ""
def get_bootloader_entry_name():
"""
Passes 'bootloader_entry_name' to other routine based
on configuration file.
:return:
"""
if "bootloaderEntryName" in libcalamares.job.configuration:
return libcalamares.job.configuration["bootloaderEntryName"]
else:
branding = libcalamares.globalstorage.value("branding")
return branding["bootloaderEntryName"]
def get_kernel_line(kernel_type):
"""
Passes 'kernel_line' to other routine based on configuration file.
:param kernel_type:
:return:
"""
if kernel_type == "fallback":
if "fallbackKernelLine" in libcalamares.job.configuration:
return libcalamares.job.configuration["fallbackKernelLine"]
else:
return " (fallback)"
else:
if "kernelLine" in libcalamares.job.configuration:
return libcalamares.job.configuration["kernelLine"]
else:
return ""
def get_zfs_root():
"""
Looks in global storage to find the zfs root
:return: A string containing the path to the zfs root or None if it is not found
"""
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
return None
# Find the root dataset
for dataset in zfs:
try:
if dataset["mountpoint"] == "/":
return dataset["zpool"] + "/" + dataset["dsName"]
except KeyError:
# This should be impossible
libcalamares.utils.warning("Internal error handling zfs dataset")
raise
return None
def is_btrfs_root(partition):
""" Returns True if the partition object refers to a btrfs root filesystem
:param partition: A partition map from global storage
:return: True if btrfs and root, False otherwise
"""
return partition["mountPoint"] == "/" and partition["fs"] == "btrfs"
def is_zfs_root(partition):
""" Returns True if the partition object refers to a zfs root filesystem
:param partition: A partition map from global storage
:return: True if zfs and root, False otherwise
"""
return partition["mountPoint"] == "/" and partition["fs"] == "zfs"
def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, kernel_type):
"""
Creates systemd-boot configuration files based on given parameters.
:param install_path:
:param efi_dir:
:param uuid:
:param entry:
:param entry_name:
:param kernel_type:
"""
kernel = libcalamares.job.configuration["kernel"]
kernel_params = ["quiet"]
partitions = libcalamares.globalstorage.value("partitions")
swap_uuid = ""
swap_outer_mappername = None
cryptdevice_params = []
# Take over swap settings:
# - unencrypted swap partition sets swap_uuid
# - encrypted root sets cryptdevice_params
for partition in partitions:
if partition["fs"] == "linuxswap" and not partition.get("claimed", None):
continue
has_luks = "luksMapperName" in partition
if partition["fs"] == "linuxswap" and not has_luks:
swap_uuid = partition["uuid"]
if (partition["fs"] == "linuxswap" and has_luks):
swap_outer_mappername = partition["luksMapperName"]
if partition["mountPoint"] == "/" and has_luks:
cryptdevice_params = ["cryptdevice=UUID="
+ partition["luksUuid"]
+ ":"
+ partition["luksMapperName"],
"root=/dev/mapper/"
+ partition["luksMapperName"]]
for partition in partitions:
# systemd-boot with a BTRFS root filesystem needs to be told abouut the root subvolume.
# If a btrfs root subvolume wasn't set, it means the root is directly on the partition
# and this option isn't needed
if is_btrfs_root(partition):
btrfs_root_subvolume = libcalamares.globalstorage.value("btrfsRootSubvolume")
if btrfs_root_subvolume:
kernel_params.append("rootflags=subvol=" + btrfs_root_subvolume)
# zfs needs to be told the location of the root dataset
if is_zfs_root(partition):
zfs_root_path = get_zfs_root()
if zfs_root_path is not None:
kernel_params.append("zfs=" + zfs_root_path)
else:
# Something is really broken if we get to this point
libcalamares.utils.warning("Internal error handling zfs dataset")
raise Exception("Internal zfs data missing, please contact your distribution")
if cryptdevice_params:
kernel_params.extend(cryptdevice_params)
else:
kernel_params.append("root=UUID={!s}".format(uuid))
if swap_uuid:
kernel_params.append("resume=UUID={!s}".format(swap_uuid))
if swap_outer_mappername:
kernel_params.append("resume=/dev/mapper/{!s}".format(
swap_outer_mappername))
kernel_line = ge
|
t_kernel_line(kernel_type)
libcalamares.utils.debug("Configure: \"{!s}\"".format(kernel_line))
if kernel_type == "fallback":
img = libcalamares.job.configuration["fallback"]
entry_name = entry_name + "-fallback"
else:
img = libcalamares.job.configuration["i
|
mg"]
conf_path = os.path.join(install_path + efi_dir,
"loader",
"entries",
entry_name + ".conf")
# Copy kernel and initramfs to a subdirectory of /efi partition
files_dir = os.path.join(install_path + efi_dir, entry_name)
os.makedirs(files_dir, exist_ok=True)
kernel_path = install_path + kernel
kernel_name = os.path.basename(kernel_path)
shutil.copyfile(kernel_path, os.path.join(files_dir, kernel_name))
img_path = install_path + img
img_name = os.path.basename(img_path)
shutil.copyfile(img_path, os.path.join(files_dir, img_name))
lines = [
'## This
|
jordens/sensortag
|
influx_udp.py
|
Python
|
lgpl-3.0
| 2,082 | 0 |
import logging
import asyncio
logger = logging.getLogger(__name__)
class InfluxLineProtocol(asyncio.DatagramProtocol):
def __init__(self, loop):
self.loop = loop
self.transport = None
def connection_made(self, transport):
self.transport = transport
@staticmethod
def fmt(measurement, fields, *, tags={}, timestamp=None):
msg = measurement
msg = msg.replace(" ", "\\ ")
msg = msg.replace(",", "\\,")
for k, v in tags.items():
k = k.replace(" ", "\\ ")
k = k.replace(",", "\\,")
k = k.replace("=", "\\=")
v = v.replace(" ", "\\ ")
v = v.replace(",", "\\,")
v = v.replace("=", "\\=")
msg += ",{}={}".format(k, v)
msg += " "
for k, v in fields.items():
k = k.replace(" ", "\\ ")
k = k.replace(",", "\\,")
k = k.replace("=", "\\=")
msg += "{:s}=".format(k)
if isinstance(v, int):
msg += "{:d}i".format(v)
elif isinsta
|
nce(v, float):
msg += "{:g}".format(v)
elif isinstance(v, bool):
msg += "{:s}".format(v)
elif isinstance(v, str):
msg += '"{:s}"'.format(v.replace('"', '\\"'))
else:
raise TypeError(v)
msg += ","
if fields:
msg = msg[:-1]
if timestamp:
msg += " {:d}".for
|
mat(timestamp)
return msg
def write_one(self, *args, **kwargs):
msg = self.fmt(*args, **kwargs)
logger.debug(msg)
self.transport.sendto(msg.encode())
def write_many(self, lines):
msg = "\n".join(lines)
logger.debug(msg)
self.transport.sendto(msg.encode())
def datagram_received(self, data, addr):
logger.error("recvd %s %s", data, addr)
self.transport.close()
def error_received(self, exc):
logger.error("error %s", exc)
def connection_lost(self, exc):
logger.info("lost conn %s", exc)
|
ikoula/cloudstack
|
tools/marvin/marvin/lib/common.py
|
Python
|
gpl-2.0
| 67,873 | 0.002328 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common functions
"""
# Import Local Modules
from marvin.cloudstackAPI import (listConfigurations,
listPhysicalNetworks,
listRegions,
addNetworkServiceProvider,
updateNetworkServiceProvider,
listDomains,
listZones,
listPods,
listOsTypes,
listTemplates,
updateResourceLimit,
listRouters,
listNetworks,
listClusters,
listSystemVms,
listStoragePools,
listVirtualMachines,
listLoadBalancerRuleInstances,
listFirewallRules,
listVolumes,
listIsos,
listAccounts,
listSnapshotPolicies,
listDiskOfferings,
listVlanIpRanges,
listUsageRecords,
listNetworkServiceProviders,
listHosts,
listPublicIpAddresses,
listPortForwardingRules,
listLoadBalancerRules,
listSnapshots,
listUsers,
listEvents,
listServiceOfferings,
listVirtualRouterElements,
listNetworkOfferings,
listResourceLimits,
listVPCOfferings,
migrateSystemVm)
from marvin.sshClient import SshClient
from marvin.codes import (PASS, FAILED, ISOLATED_NETWORK, VPC_NETWORK,
BASIC_ZONE, FAIL, NAT_RULE, STATIC_NAT_RULE,
RESOURCE_PRIMARY_STORAGE, RESOURCE_SECONDARY_STORAGE,
RESOURCE_CPU, RESOURCE_MEMORY, PUBLIC_TRAFFIC,
GUEST_TRAFFIC, MANAGEMENT_TRAFFIC, STORAGE_TRAFFIC,
VMWAREDVS)
from marvin.lib.utils import (validateList,
xsplit,
get_process_status,
random_gen,
format_volume_to_ext3)
from marvin.lib.base import (PhysicalNetwork,
PublicIPAddress,
NetworkOffering,
NATRule,
StaticNATRule,
Volume,
Account,
Project,
Snapshot,
NetScaler,
VirtualMachine,
FireWallRule,
Template,
Network,
Host,
Resources,
Configurations,
Router,
PublicIpRange,
|
StorageNetworkIpRange,
TrafficType)
from marvin.lib.vcenter import Vcenter
from netaddr import IPAddress
import random
import re
import itertools
import random
import hashlib
# Import System modules
import time
def is_config_suitable(apiclient, name, value):
"""
Ensure if the deployment has the expected `value`
|
for the global setting `name'
@return: true if value is set, else false
"""
configs = Configurations.list(apiclient, name=name)
assert(
configs is not None and isinstance(
configs,
list) and len(
configs) > 0)
return configs[0].value == value
def wait_for_cleanup(apiclient, configs=None):
"""Sleeps till the cleanup configs passed"""
# Configs list consists of the list of global configs
if not isinstance(configs, list):
return
for config in configs:
cmd = listConfigurations.listConfigurationsCmd()
cmd.name = config
cmd.listall = True
try:
config_descs = apiclient.listConfigurations(cmd)
except Exception as e:
raise Exception("Failed to fetch configurations: %s" % e)
if not isinstance(config_descs, list):
raise Exception("List configs didn't returned a valid data")
config_desc = config_descs[0]
# Sleep for the config_desc.value time
time.sleep(int(config_desc.value))
return
def add_netscaler(apiclient, zoneid, NSservice):
""" Adds Netscaler device and enables NS provider"""
cmd = listPhysicalNetworks.listPhysicalNetworksCmd()
cmd.zoneid = zoneid
physical_networks = apiclient.listPhysicalNetworks(cmd)
if isinstance(physical_networks, list):
physical_network = physical_networks[0]
cmd = listNetworkServiceProviders.listNetworkServiceProvidersCmd()
cmd.name = 'Netscaler'
cmd.physicalnetworkid = physical_network.id
nw_service_providers = apiclient.listNetworkServiceProviders(cmd)
if isinstance(nw_service_providers, list):
netscaler_provider = nw_service_providers[0]
else:
cmd1 = addNetworkServiceProvider.addNetworkServiceProviderCmd()
cmd1.name = 'Netscaler'
cmd1.physicalnetworkid = physical_network.id
netscaler_provider = apiclient.addNetworkServiceProvider(cmd1)
netscaler = NetScaler.add(
apiclient,
NSservice,
physicalnetworkid=physical_network.id
)
if netscaler_provider.state != 'Enabled':
cmd = updateNetworkServiceProvider.updateNetworkServiceProviderCmd()
cmd.id = netscaler_provider.id
cmd.state = 'Enabled'
apiclient.updateNetworkServiceProvider(cmd)
return netscaler
def get_region(apiclient, region_id=None, region_name=None):
'''
@name : get_region
@Desc : Returns the Region Information for a given region id or region name
@Input : region_name: Name of the Region
region_id : Id of the region
@Output : 1. Region Information for the passed inputs else first Region
2. FAILED In case the cmd failed
'''
cmd = listRegions.listRegionsCmd()
if region_name is not None:
cmd.name = region_name
if region_id is not None:
cmd.id = region_id
cmd_out = apiclient.listRegions(cmd)
return FAILED if validateList(cmd_out)[0] != PASS else cmd_out[0]
def get_domain(apiclient, domain_id=None, domain_name=None):
'''
@name : get_domain
@Desc : Returns the Domain Information for a given domain id or domain name
@Input : domain id : Id of the Domain
domain_name : Name of the Domain
@Output : 1. Domain Information for the passed inputs else first Domain
2. FAILED In case the cmd fai
|
eladnoor/small-molecule-regulation
|
python/topology.py
|
Python
|
mit
| 3,400 | 0.000882 |
# Pre-compute the shortest path length in the stoichiometric matrix
# NB: check some of the shortest path calcs?
import pdb
import settings
import networkx as nx
import pandas as pd
import numpy as np
import os
METS_TO_REMOVE = ['h', 'h2o', 'co2', 'o2', 'pi', 'atp', 'adp', 'amp',
'nad', 'nadh', 'nadp', 'nadph', 'coa', 'thf', '5mthf',
'5fthf', 'methf', 'mlthf', 'nh4', 'cmp', 'q8', 'q8h2',
'udp', 'udpg', 'fad', 'fadh2', 'ade', 'ctp', 'gtp', 'h2o2',
'mql8', 'mqn8', 'na1', 'ppi', 'acp']
def convert_to_bipartite(S):
"""
convert a standard stoichiometric matrix (in a Pandas DataFrame)
to a bipartite graph with an edge between every reactant and all its
reactions
"""
# convert the stoichiometric matrix to a sparse representation
S_sparse = pd.melt(S.reset_index(),
id_vars='bigg.metabolite', value_name='coeff')
S_sparse = S_sparse[S_sparse.coeff != 0]
# remove the high-degree metabolites that we want to ignore for graph
# distance
met_comp = S_sparse['bigg.metabolite'].str.rsplit('_', 1, expand=True)
S_sparse = S_sparse[(~met_comp[0].isin(METS_TO_REMOVE)) & (met_comp[1] == 'c')]
S_sparse['bigg.metabolite'] = met_comp[0].str.upper()
mets = set(S_sparse['bigg.metabolite'].unique())
rxns = set(S_sparse['bigg.reaction'].unique())
B = nx.Graph()
B.add_nodes_from(mets, bipartite=0)
B.add_nodes_from(rxns, bipartite=1)
B.add_weighted_edges_from(S_sparse.as_matrix())
return B, mets, rxns
def calculate_distances(smrn):
smrn['bigg.metabolite'] = smrn['bigg.metabolite'].str.upper()
# %% Read BIGG model
model, S = settings.get_ecoli_json()
B, mets, rxns = convert_to_bipartite(S)
spl = dict(nx.shortest_path_length(B))
spl_values = []
for met in mets:
r = rxns.intersection(spl[met].keys())
spl_values += list(map(spl[met].get, r))
all_distances = (np.array(spl_values) - 1.0) / 2
smrn_dist = smrn[['bigg.metabolite', 'bigg.reaction']].drop_duplicates()
smrn
|
_dist['distance'] = pd.np.nan
for
|
i, row in smrn_dist.iterrows():
source = row['bigg.metabolite'] # remember we dropped it before
target = row['bigg.reaction']
if source.lower() in METS_TO_REMOVE:
continue
if target in spl[source]:
smrn_dist.at[i, 'distance'] = (spl[source][target] - 1.0) / 2.0
# %% Save data
smrn_dist = smrn_dist.dropna()
return smrn_dist, all_distances
if __name__ == '__main__':
# print out a list of all 0-distance interaction, i.e. substrate
# or product inhibition
smrn = pd.read_csv(os.path.join(settings.CACHE_DIR,
'iJO1366_SMRN.csv'), index_col=None)
smrn_dist, all_distances = calculate_distances(smrn)
smrn_merged = pd.merge(smrn, smrn_dist, on=['bigg.metabolite',
'bigg.reaction'])
dist_mode_df = smrn_merged.groupby(('bigg.metabolite',
'bigg.reaction', 'Mode')).first()
dist_mode_df = dist_mode_df[['distance']].reset_index()
react_inhibition = dist_mode_df[(dist_mode_df['Mode'] == '-') & (dist_mode_df['distance'] == 0)]
react_inhibition.to_excel(os.path.join(settings.RESULT_DIR, 'reactant_inhibition.xls'))
|
ikn/farragone
|
farragone/__init__.py
|
Python
|
gpl-3.0
| 418 | 0.002392 |
"""Farragone.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Pu
|
blic License as
|
published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version."""
import gettext
from . import coreconf as _conf
gettext.install(_conf.IDENTIFIER, _conf.PATH_LOCALE, names=('ngettext',))
from . import util, conf, core, ui
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_network_interface_load_balancers_operations.py
|
Python
|
mit
| 5,808 | 0.004304 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate thi
|
s class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
|
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceLoadBalancerListResult"]
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceLoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
|
Moshiasri/learning
|
Python_dataCamp/Map()LambdaFunction.py
|
Python
|
gpl-3.0
| 474 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 20:12:17 2017
@author: Mohtashim
"""
# Create a list of strings: spells
spells = ["protego", "accio", "expecto patronum", "legilimens"]
# Use map() to apply a lambda function over spells: shout_spells
shout_spells = map(lambda item: item + '!!
|
!', spells)
# Con
|
vert shout_spells to a list: shout_spells_list
shout_spells_list = list(shout_spells)
# Convert shout_spells into a list and print it
print(shout_spells_list)
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/deed/corellia/player_house_deed/shared_corellia_house_large_deed.py
|
Python
|
mit
| 490 | 0.044898 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLIN
|
E DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/corellia/player_house_deed
|
/shared_corellia_house_large_deed.iff"
result.attribute_template_id = 2
result.stfName("deed","corellia_house_large_deed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
111t8e/h2o-2
|
py/test_config_basic.py
|
Python
|
apache-2.0
| 597 | 0.005025 |
import h2o, h2o_config
l = h2o_config.setup_test_config(test_config_json='test_config.json')
print "\nsetup_test_config returns list o
|
f test config objs:", l
# Here are some ways to reference the config state that the json created
print "\nHow to reference.."
for i, obj in enumerate(h2o_config.configs):
print "keys in config", i, ":", obj.__dict__.keys()
prin
|
t h2o_config.configs[0].trees
for t in h2o_config.configs:
print "\nTest config_name:", t.config_name
print "trees:", t.trees
print "params:", t.params
print "params['timeoutSecs']:", t.params['timeoutSecs']
|
allure-framework/allure-python
|
allure-robotframework/examples/status/status_library.py
|
Python
|
apache-2.0
| 129 | 0 |
from robot.libraries.BuiltIn import BuiltIn
def fail_with_traceback(traceback
|
_message):
BuiltIn().f
|
ail(traceback_message)
|
ManInAGarden/PiADCMeasure
|
tkwindow.py
|
Python
|
lgpl-3.0
| 4,958 | 0.010286 |
# -*- coding: utf-8 *-*
# made for python3!
from tkinter import *
from tkinter.ttk import *
class TkWindow():
registers = {}
def __init__(self, parent, title, width=400, height=300):
self.parent = parent #Tk or toplevel
self.w = width
self.h = height
self.make_gui(title)
self.loaded()
def loaded(self):
pass # overload me
"""register another window to receive a signal"""
@classmethod
def register(cls, target, signame):
if not target in cls.registers:
cls.registers[target] = []
cls.registers[target].append(signame)
"""send a signal to all registered windows"""
def send(self, signame, data=None):
cls = self.__class__
for targ, sigs in cls.registers.items():
if sigs != None:
if signame in sigs:
targ.receive(self, signame, data)
"""receive a signame"""
def receive(self, sender, signame, data):
print("receive not overloaded but signal registered for <"
+ signame + "> from <"
+ str(sender) + "> with <" + str(data) +">")
# overload me in your receiving window for your application
def make_gui(self, title):
self.parent.title(title)
Style().configure("TFrame", padding=5)
self.frame = Frame(self.parent,
width=self.w,
height=self.h)
def makelabel(self, parent, lcol=0, lrow=0, caption='', **options):
entry = Label(parent, text=caption, **options).grid(row=lrow, column=lcol, sticky=NE)
return entry
"""create a multiline text entry field with a label"""
def maketext(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options):
print(lrow, lcol)
if caption != '':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=NE)
entry = Text(parent, **options)
if width:
entry.config(width=width)
entry.grid(row=erow, column=ecol, sticky=W)
return entry
def makeentry(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options):
if caption!='':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E)
entry = Entry(parent, **options)
if width:
entry.config(width=width)
entry.grid(row=erow, column=ecol, sticky=W)
return entry
def setentryvalue(self, entry, value):
entry.delete(0,END)
|
entry.insert(0, value)
def settextvalue(self, entry, value):
entry.delete(0.0,END);
entry.insert(0.0, value);
def setbuttontext(self, button, txt):
button['text'] = txt
def makecombo(self, parent, ccol=1, c
|
row=0, lcol=0, lrow=0, caption='',
width=None, **options):
if caption!='':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E)
cbox = Combobox(parent, **options)
if width:
cbox.config(width=width)
cbox.grid(row=crow, column=ccol)
return cbox
def makecheck(self, parent, ecol=0, erow=0, caption='', **options):
cb = Checkbutton(parent, text=caption, **options)
cb.grid(row=erow, column=ecol, sticky=W)
return cb
def makebutton(self, parent, bcol=0, brow=0, caption='Press me', sticky=W, **options):
bu = Button(parent, text=caption, **options)
bu.grid(row=brow, column=bcol, sticky=sticky)
return bu
"""create a list at the givne position"""
def makelist(self, parent, llcol=0, llrow=1, lcol=0, lrow=0,
caption='List', elements=[], mode='v',
lrowspan=1, lcolspan=1,
**options):
frame = Frame(parent)
frame.grid(row=lrow, column=lcol, rowspan=lrowspan, columnspan=lcolspan)
hscroll = vscroll = None
if caption!='':
Label(parent, text=caption).grid(row=llrow, column=llcol, sticky=W)
lb = Listbox(frame, **options)
if 'v' in mode:
vscroll = Scrollbar(frame, orient=VERTICAL)
lb.config(yscrollcommand = vscroll.set)
vscroll.config(command=lb.yview)
vscroll.pack(side=RIGHT, fill=Y)
if 'h' in mode:
hscroll = Scrollbar(frame, orient=HROZONTAL)
lb.configure(xscrollcommand = hscroll.set)
hscroll.config(command = lb.xview)
hscroll.pack(side=BOTTOM, fill=X)
lb.pack(side=LEFT, fill=BOTH, expand=1)
if len(elements)>0:
self.setlistelements(elements)
return lb
def setlistelements(self, lb, elements):
lb.delete(0, END)
for element in elements:
lb.insert(END, element)
|
SINGROUP/pycp2k
|
pycp2k/classes/_davidson2.py
|
Python
|
lgpl-3.0
| 666 | 0.003003 |
from pycp2k.inputsection import InputSection
from ._each112 import _each112
class _davidson2(InputSection):
def __init__(self):
InputSection._
|
_init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each112()
self._name = "DAVIDSON"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels
|
': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
robocomp/robocomp-robolab
|
experimental/dumbGlobalTrajectory/src/trajectoryrobot2dI.py
|
Python
|
gpl-3.0
| 1,815 | 0.019835 |
#
# Copyright (C) 2016 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, Ice
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except:
print '$ROBOCOMP enviro
|
nment variable not set, using the default value /opt/robocomp'
ROBOCOMP = '/opt/robocomp'
if len(ROBOCOMP)<1:
print 'ROBOCOMP environment variable not set! Exiting.'
sys.exit()
preStr = "-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/"
Ice.loadSlice(preStr+"TrajectoryRobot2D.ice")
from RoboCompTrajectoryR
|
obot2D import *
class TrajectoryRobot2DI(TrajectoryRobot2D):
def __init__(self, worker):
self.worker = worker
def getState(self, c):
return self.worker.getState()
def goBackwards(self, target, c):
return self.worker.goBackwards(target)
def stop(self, c):
return self.worker.stop()
def goReferenced(self, target, xRef, zRef, threshold, c):
return self.worker.goReferenced(target, xRef, zRef, threshold)
def changeTarget(self, target, c):
return self.worker.changeTarget(target)
def go(self, target, c):
return self.worker.go(target)
def mapBasedTarget(self, parameters, c):
return self.worker.mapBasedTarget(parameters)
|
luotao1/Paddle
|
python/paddle/fluid/dygraph/profiler.py
|
Python
|
apache-2.0
| 886 | 0 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the
|
License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_functio
|
n
from .. import core
__all__ = [
'start_gperf_profiler',
'stop_gperf_profiler',
]
def start_gperf_profiler():
core.start_imperative_gperf_profiler()
def stop_gperf_profiler():
core.stop_imperative_gperf_profiler()
|
b-com/watcher-metering
|
watcher_metering/agent/agent.py
|
Python
|
apache-2.0
| 5,245 | 0 |
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In charge of collecting data from drivers and push it to the publisher."""
import os
import msgpack
import nanomsg
from oslo_log import log
from watcher_metering.agent.manager import MetricManager
LOG = log.getLogger(__name__)
class Agent(MetricManager):
def __init__(self, conf, driver_names, use_nanoconfig_service,
publisher_endpoint, nanoconfig_service_endpoint,
nanoconfig_update_endpoint, nanoconfig_profile):
"""
:param conf: Configuration obtained from a configuration file
:type conf: oslo_config.cfg.ConfigOpts instance
:param driver_names: The list of driver names to register
:type driver_names: list of str
:param use_nanoconfig_service: Indicates whether or not it should use a
nanoconfig service
:type use_nanoconfig_service: bool
:param publisher_endpoint: Publisher server URI
:type publisher_endpoint: str
:param nanoconfig_service_endpoint: Nanoconfig service URI
:type nanoconfig_service_endpoint: str
:param nanoconfig_update_endpoint: Nanoconfig update service URI
:type nanoconfig_update_endpoint: str
:param nanoconfig_profile: Nanoconfig profile URI
:type nanoconfig_profile: str
"""
super(Agent, self).__init__(conf, driver_names)
self.socket = nanomsg.Socket(nanomsg.PUSH)
self.use_nanoconfig_service = use_nanoconfig_service
self.publisher_endpoint = publisher_endpoint
self.nanoconfig_service_endpoint = nanoconfig_service_endpoint
self.nanoconfig_update_endpoint = nanoconfig_update_endpoint
self.nanoconfig_profile = nanoconfig_profile
@property
def namespace(self):
return "watcher_metering.drivers"
def start(self):
LOG.info("[Agent] Starting main thread...")
super(Agent, self).start()
def setup_socket(self):
if self.use_nanoconfig_service:
self.set_nanoconfig_endpoints()
self.socket.configure(self.nanoconfig_profile)
LOG.info("[Agent] Agent nanomsg's profile `%s`",
self.nanoconfig_profile)
else:
LOG.debug("[Agent] Agent connected to: `%s`",
self.publisher_endpoint)
self.socket.connect(self.publisher_endpoint)
LOG.info("[Agent] Ready for pushing to Publisher node")
def set_nanoconfig_endpoints(self):
"""This methods sets both the `NN_CONFIG_SERVICE` and
`NN_CONFIG_UPDATES` environment variable as nanoconfig uses it to
access the nanoconfig service
"""
# NN_CONFIG_SERVICE:
nn_config_service = os.environ.get("NN_CONFIG_SERVICE")
if not self.nanoconfig_service_endpoint and not nn_config_service:
raise ValueError(
"Invalid configuration! No NN_CONFIG_SERVICE set. You need to "
"configure your `nanoconfig_service_endpoint`.")
if self.nanoconfig_service_endpoint:
os.environ["NN_CONFIG_SERVICE"] = self.nanoconfig_service_endpoint
else:
self.nanoconfig_service_endpoint = nn_config_service
# NN_CONFIG_UPDATES
nn_config_updates = os.environ.get("NN_CONFIG_UPDATES")
if not self.nanoconfig_update_endpoint and not nn_config_updates:
raise ValueError(
"Invalid configuration! No NN_CONFIG_UPDATES set. You need to "
"configure your `nanoconfig_update_endpoint`.")
if self.nanoconfig_update_endpoint:
os.environ["NN_CONFIG_UPDATES"] = self.nanoconfig_u
|
pdate_endpoint
else:
self.nanoconfig_update_endpoint = nn_config_updates
def run(self):
self.setup_socket()
super(Agent, self).run()
def stop(self):
self.socket.close()
super(Agent, self).stop()
LOG.debug("[Agent] Stopped")
def update(self, notifier, data):
LOG.debug("[Agent] Upda
|
ted by: %s", notifier)
LOG.debug("[Agent] Preparing to send message %s", msgpack.loads(data))
try:
LOG.debug("[Agent] Sending message...")
# The agent will wait for the publisher server to be listening on
# the related publisher_endpoint before continuing
# In which case, you should start the publisher to make it work!
self.socket.send(data)
LOG.debug("[Agent] Message sent successfully!")
except nanomsg.NanoMsgError as exc:
LOG.error("Exception during sending the message to controller %s",
exc.args[0])
|
TzuChieh/Photon-v2
|
BlenderAddon/PhotonBlend/bmodule/common/__init__.py
|
Python
|
mit
| 73 | 0.013699 |
def mangled_node_tree_name(b_mate
|
rial):
return "PH_" + b_material
|
.name
|
fbcom/project-euler
|
018_maximum_path_sum_1.py
|
Python
|
mit
| 1,351 | 0.000741 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Maximum path sum I" – Project Euler Problem No. 18
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: http
|
s://projecteuler.net/problem=18
#
def g
|
et_triangular_list(str):
ret = []
tmp = []
i = j = 1
for n in str.split():
tmp.append(int(n))
j = j + 1
if j > i:
ret.append(tmp)
tmp = []
j = 1
i = i + 1
return ret
def find_max_path(nums, row, col):
if row == len(nums):
return 0
n = nums[row][col]
a = n + find_max_path(nums, row + 1, col + 0)
b = n + find_max_path(nums, row + 1, col + 1)
return max(a, b)
# Testrun
pyramid = """
3
7 4
2 4 6
8 5 9 3
"""
tri = get_triangular_list(pyramid)
assert (23 == find_max_path(tri, 0, 0)), "Testcase failed"
# Solve
pyramid = """
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
"""
tri = get_triangular_list(pyramid)
print "Solution:", find_max_path(tri, 0, 0)
|
vulcansteel/autorest
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyArray/auto_rest_swagger_bat_array_service/models/product.py
|
Python
|
mit
| 931 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
|
See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Product(Model):
_required = []
_attribute_map
|
= {
'integer': {'key': 'integer', 'type': 'int'},
'string': {'key': 'string', 'type': 'str'},
}
def __init__(self, *args, **kwargs):
"""Product
:param int integer
:param str string
"""
self.integer = None
self.string = None
super(Product, self).__init__(*args, **kwargs)
|
markstoehr/structured_gaussian_mixtures
|
structured_gaussian_mixtures/mdn_experiment_one_ahead.py
|
Python
|
apache-2.0
| 5,886 | 0.002039 |
from __future__ import print_function, division
import cPickle
import gzip
import os
import sys
import timeit
import numpy
import theano
from theano import tensor
import mdn_one_ahead
# parameters
batch_size = 100
L1_reg=0.00
L2_reg=0.0001
n_epochs=200
learning_rate = 0.001
momentum = 0.9
sigma_in = 320
mixing_in = 320
n_components = 5
EPS = numpy.finfo(theano.config.floatX).eps
# load data
datasets = mdn_one_ahead.load_data()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
X = train_set_x.get_value(borrow=True)[:20].copy()
Y = train_set_y.get_value(borrow=True)[:20].copy()
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
print( '... building the model')
# allocate symbolic variables for the data
index = tensor.lscalar() # index to a [mini]batch
x = tensor.matrix('x') # the data is presented as rasterized images
y = tensor.vector('y') # the labels are presented as 1D vector of
rng = numpy.random.RandomState(1234)
classifier = mdn_one_ahead.MLP(
rng=rng,
input=x,
n_in=320,
n_hiddens=[300, 300, 300, 300]
)
cost = (
classifier.negative_log_likelihood(y)
+ L2_reg * classifier.L2_sqr
)
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
gparams = [tensor.grad(cost, param) for param in classifier.params]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
model_gradients = theano.function(
inputs = [x, y], outputs=gparams)
train_gradients = theano.function(
inputs=[index],
outputs=gparams,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.99995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
gs = train_gradients(minibatch_index)
if any(numpy.any(numpy.isnan(g)) for g in gs):
import pdb; pdb.set_trace()
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_v
|
alidation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#i
|
mprove patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# l = 7.752, tanh, 3 components, 20 hid, 1 hidlayer,
# l = 5.057, relu, 3 components, (100, 100) hid
# l = 4.865, relu, 5 components, (150, 150, 150) hid
|
adafruit/micropython
|
ports/nrf/examples/ubluepy_scan.py
|
Python
|
mit
| 923 | 0.005417 |
from ubluepy import Scanner, constants
def bytes_to_str(bytes):
st
|
ring = ""
for b in bytes:
string += chr(b)
return string
def get_device_names(scan_entries):
dev_names = []
for e in scan_entries:
scan = e.getScanData()
if scan:
for s in scan:
if s[0] == constants.ad_types.AD_TYPE_COMPLETE_LOCAL_NAME:
dev_names.append((e, bytes_to_str(s[2])))
return dev_names
def find_device_by_name(name):
s = Scanner()
scan_res = s.scan(100)
|
device_names = get_device_names(scan_res)
for dev in device_names:
if name == dev[1]:
return dev[0]
# >>> res = find_device_by_name("micr")
# >>> if res:
# ... print("address:", res.addr())
# ... print("address type:", res.addr_type())
# ... print("rssi:", res.rssi())
# ...
# ...
# ...
# address: c2:73:61:89:24:45
# address type: 1
# rssi: -26
|
why2pac/dp-tornado
|
example/controller/tests/view/ui_methods/mmdd.py
|
Python
|
mit
| 1,216 | 0.004934 |
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class MmddController(Controller):
def get(self):
self.model.tests.helper_test.datetime.switch_timezone('Asia/Seoul')
ts = 1451671445
ms = ts * 1000
dt = self.helper.datetime.convert(timestamp=ts)
args_dt = {'datetime': dt}
args_ms = {'timestamp': ms, 'ms': True}
args_ts = {'timestamp': ts}
args_dt_cc = {'datetime': dt, 'concat': ''}
args_ms_cc = {'timestamp': ms, 'ms': True, 'concat': '/'}
args_ts_cc = {'timestamp': ts, 'concat': '/'}
assert(self.render_strin
|
g('tests/view/ui_method
|
s/mmdd.html', {'args': args_dt}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ms}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ts}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_dt_cc}) == '0102')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ms_cc}) == '01/02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ts_cc}) == '01/02')
|
tiborsimko/jsonalchemy
|
jsonalchemy/jsonext/functions/util_split.py
|
Python
|
gpl-2.0
| 1,313 | 0.000762 |
# -*- coding: utf-8 -*-
#
# This file is part of JSONAlchemy.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# JSONAlchemy is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
|
# License, or (at your option) any later version.
#
# JSONAlchemy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JSONAlchemy; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 33
|
0, Boston, MA 02111-1307, USA.
"""Function for tokenizing strings in models' files."""
def util_split(string, separator, index):
"""
Helper function to split safely a string and get the n-th element.
:param string: String to be split
:param separator:
:param index: n-th part of the split string to return
:return: The n-th part of the string or empty string in case of error
"""
string_splitted = string.split(separator)
try:
return string_splitted[index].strip()
except:
return ""
|
jonparrott/google-cloud-python
|
spanner/google/cloud/spanner_admin_database_v1/types.py
|
Python
|
apache-2.0
| 1,907 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "Licens
|
e");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing perm
|
issions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.iam.v1.logging import audit_data_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.spanner_admin_database_v1.proto import (
spanner_database_admin_pb2)
_shared_modules = [
http_pb2,
iam_policy_pb2,
policy_pb2,
audit_data_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
spanner_database_admin_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.spanner_admin_database_v1.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
WorldBank-Transport/DRIVER
|
app/black_spots/tasks/calculate_black_spots.py
|
Python
|
gpl-3.0
| 4,985 | 0.004814 |
import datetime
import os
import shutil
import tarfile
import tempfile
from django.conf import settings
from django.utils import timezone
from celery import shared_task
from celery.utils.log import get_task_logger
from grout.models import RecordType
from black_spots.tasks import (
forecast_segment_incidents,
load_blackspot_geoms,
load_road_network,
get_training_noprecip
)
from black_spots.tasks.get_segments import get_segments_shp, create_segments_tar
from black_spots.models import BlackSpotTrainingCsv, RoadSegmentsShapefile, BlackSpotConfig
from data.tasks.fetch_record_csv import export_records
logger = get_task_logger(__name__)
COMBINED_SEGMENTS_SHP_NAME = os.getenv('COMBINED_SEGMENTS_SHP_NAME', 'combined_segments.shp')
def get_latest_segments_tar_uuid(roads_srid, records_csv_obj_id):
cutoff = timezone.now() - datetime.timedelta(days=30)
segments_shp_obj = RoadSegmentsShapefile.objects.all().order_by('-created').first()
# Refresh road segments if the most recent one is more than 30 days out of date
if segments_shp_obj and segments_shp_obj.created > cutoff:
logger.info("Using existing RoadSegmentsShapefile")
return str(segments_shp_obj.uuid)
logger.info("Creating new RoadSegmentsShapefile")
logger.info("Loading road network")
lines_shp_path = load_road_network(output_srid='EPSG:{}'.format(roads_srid))
logger.info("Creating segments shape files")
shp_output_dir = get_segments_shp(lines_shp_path, records_csv_obj_id, roads_srid)
logger.info("Compressing shape files into tarball")
return create_segments_tar(shp_output_dir)
def get_forecast_csv_path(segments_shp_uuid, records_csv_obj_id, roads_srid):
# - Match events to segments shapefile
blackspots_ou
|
tput = get_training_noprecip(
segments_shp_uuid,
records_csv_obj_id,
roads_srid
)
# - Run Rscript to output CSV
segments_csv = BlackSpotTrainingCsv.objects.get(pk=blackspots_output).csv.path
return forecast_segment_incidents(segments_csv, '/var/www/media/forecasts.csv')
@shared_tas
|
k
def calculate_black_spots(history_length=datetime.timedelta(days=5 * 365 + 1), roads_srid=3395):
"""Integrates all black spot tasks into a pipeline
Args:
history_length (timedelta): Length of time to use for querying for historic records.
Note: the R script will fail if it doesn't have a certain
amount of data, which is why this is set to 5 years.
TODO: make the R script more robust, so it can handle a
dynamic number of years without failure.
roads_srid (int): SRID in which to deal with the Roads data
"""
try:
severity_percentile_threshold = (
BlackSpotConfig.objects.all().order_by('pk').first().severity_percentile_threshold
)
except AttributeError:
logger.warn('BlackSpots are not fully configured; set a percentile cutoff first.')
return
# Get the parameters we'll use to filter down the records we want
# Note that this assumes that the RecordType with this label to be used will also be marked as
# `active`. The `load_incidents` script ensures only the most recent record type is set as such.
record_type_pk = RecordType.objects.filter(
label=settings.BLACKSPOT_RECORD_TYPE_LABEL,
active=True
).first().pk
# - Get events CSV. This is obtained before the road network segments are calculated
# as an optimization, so we can ignore roads that won't have any associated records.
now = timezone.now()
oldest = now - history_length
records_csv_obj_id = export_records(
oldest,
now,
record_type_pk
)
# Get the UUID, since that is what is used when passing to tasks in the chain
segments_shp_uuid = get_latest_segments_tar_uuid(
roads_srid,
records_csv_obj_id
)
forecasts_csv = get_forecast_csv_path(
segments_shp_uuid,
records_csv_obj_id,
roads_srid
)
# - Load blackspot geoms from shapefile and CSV
# The shapefile is stored as a gzipped tarfile so we need to extract it
tar_output_dir = tempfile.mkdtemp()
try:
shp_tar = RoadSegmentsShapefile.objects.get(uuid=segments_shp_uuid).shp_tgz.path
with tarfile.open(shp_tar, "r:gz") as tar:
# TODO: Extract only the combined segments file, not the entire tarball
tar.extractall(tar_output_dir)
logger.info("Performing blackspot calculations")
segments_path = os.path.join(tar_output_dir, 'segments', COMBINED_SEGMENTS_SHP_NAME)
load_blackspot_geoms(
segments_path,
forecasts_csv,
record_type_pk,
roads_srid,
output_percentile=severity_percentile_threshold
)
finally:
shutil.rmtree(tar_output_dir)
|
sivertkh/gtrackcore
|
gtrackcore/test/track_operations/FlankTest.py
|
Python
|
gpl-3.0
| 3,882 | 0.000258 |
import unittest
import numpy as np
from collections import OrderedDict
from gtrackcore.metadata import GenomeInfo
from gtrackcore.track.core.GenomeRegion import GenomeRegion
from gtrackcore.track.format.TrackFormat import TrackFormat
from gtrackcore.track_operations.operations.Flank import Flank
from gtrackcore.track_operations.TrackContents import TrackContents
from gtrackcore.test.track_operations.OperationTest import createTrackView
class FlankTest(unittest.TestCase):
def setUp(self):
self.chr1 = (GenomeRegion('hg19', 'chr1', 0,
GenomeInfo.GENOMES['hg19']['size']['chr1']))
self.chromosomes = (GenomeRegion('hg19', c, 0, l)
for c, l in
GenomeInfo.GENOMES['hg19']['size'].iteritems())
def _runFlankSegmentsTest(self, starts, ends, expStarts, expEnds,
nrBP, after=True, before=True):
"""
Run a test on the creation of a Flank track from a segmented track.
The test expects there to only to be segments in chr1,
All other chromosomes need to be of size zero.
:param startsA: Arrays of starts in track.
:param endsA: Array of ends in track.
:param expStarts: Expected starts of flanks.
:param expEnds: Expected ends of flanks.
:parap nrBP: INT. Size of flank i base pairs.
:param after: Boolean. Flanks from the starts.
:param before: Boolean. Flanks form the ends.
:return:
"""
track = sel
|
f._createTrackContent(starts, ends)
f = Flank(track)
# Result track type is Segments as d
|
efault
f.setFlankSize(nrBP)
f.setAfter(after)
f.setBefore(before)
tc = f()
for (k, v) in tc.getTrackViews().items():
print expStarts
print v.startsAsNumpyArray()
print expEnds
print v.endsAsNumpyArray()
if cmp(k, self.chr1) == 0:
# All test tracks are in chr1
self.assertTrue(np.array_equal(v.startsAsNumpyArray(),
expStarts))
self.assertTrue(np.array_equal(v.endsAsNumpyArray(), expEnds))
else:
# Tests if all tracks no in chr1 have a size of 0.
self.assertEqual(v.startsAsNumpyArray().size, 0)
self.assertEqual(v.endsAsNumpyArray().size, 0)
def _createTrackContent(self, starts, ends):
"""
Create a track view a start, end list pair.
Help method used in testing. This method will create a hg19 tracks with
data in chromosome 1 only.
:param starts: List of track start positions
:param ends: List of track end positions
:return: A TrackContent object
"""
starts = np.array(starts)
ends = np.array(ends)
tv = createTrackView(region=self.chr1, startList=starts, endList=ends,
allow_overlap=False)
d = OrderedDict()
d[self.chr1] = tv
return TrackContents('hg19', d)
# **** Points tests ****
# **** Segments tests ****
def testFlankSimpleBefore(self):
"""
Simple single segment before.
:return: None
"""
self._runFlankSegmentsTest(starts=[100], ends=[150], expStarts=[50],
expEnds=[100], nrBP=50, after=False,
before=True)
def testFlankSimpleAfter(self):
"""
Simple single segment after.
:return: None
"""
self._runFlankSegmentsTest(starts=[100], ends=[150], expStarts=[150],
expEnds=[200], nrBP=50, after=True,
before=False)
if __name__ == "__main__":
unittest.main()
|
Dr762/PythonExamples3.4
|
Python3/restraunt_finder.py
|
Python
|
gpl-2.0
| 6,651 | 0.001504 |
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__ = "alex"
__date__ = "$Jun 1, 2015 10:46:55 PM$"
from math import radians, sin, cos, sqrt, asin
from bs4 import BeautifulSoup
from types import SimpleNamespace
import urllib.request
import urllib.parse
import json
class Restraunt:
def __init__(self, name, address, last_inspection, category):
self.name = name
self.address = address
self.last_inspection = last_inspection
self.category = category
scheme_host = "http://www.healthspace.com"
vdh_detail_translate = {
'Phone Number:': 'phone_number',
'Facility Type:': 'facility_type',
'# of Priority Foundation Items on Last Inspection:': 'priority_foundation_items',
'# of Priority Items on Last Inspection:': 'prioirty_items',
'# of Core Items on Last Inspection:': 'core_items',
'# of Critical Violations on Last Inspection:': 'critical_items',
'# of Non-Critical Violations on Last Inspection:': 'non_critical_items'
}
MI = 3959
NM = 3440
KM = 6371
def harversine(point1, point2, R=KM):
lat_1, lon_1 =
|
point1
lat_2, lon_2 = point2
delta_lat = radians(lat_2 - lat_1)
delta_lon = radians(lon_2 - lon_1)
lat_1 = radians(lat_1)
lat_2 = radians(lat_2)
a = sin(delta_lat / 2) ** 2 + cos(lat_1) * cos(lat_2) * sin(delta_lon / 2) ** 2
c = 2 * asin(sqrt(a))
return R * c
def get_food_list_by_name(): # get all restraunts
pat
|
h = "/Clients/VDH/Norfolk/Norolk_Website.nsf/Food-List-ByName"
form = {
"OpenView": "",
"RestrictToCategory": "faa4e68b1bbbb48f008d02bf09dd656f",
"count": "400",
"start": "1",
}
query = urllib.parse.urlencode(form)
with urllib.request.urlopen(scheme_host + path + "?" + query) as data:
soup = BeautifulSoup(data.read())
return soup
def food_table_iter(soup):
"""Columns are 'Name', '' , 'Facility Location', 'Last Inspection',
Plus an unnamed column with a RestrictToCategory key"""
table = soup.html.body.table
for row in table.find_all("tr"):
columns = [td.text.strip() for td in row.find_all("td")]
for td in row.find_all("td"):
if td.a:
url = urllib.parse.urlparse(td.a["href"])
form = urllib.parse.parse_qs(url.query)
columns.append(form['RestrictToCategory'][0])
yield columns
def food_row_iter(table_iter):
heading = next(table_iter)
for row in table_iter:
yield Restraunt(name=row[0], address=row[2], last_inspection=row[3], category=row[4])
def geocode_detail(business):
form = {
"address": business.address + ", Norfolk, VA",
"sensor": "false",
}
query = urllib.parse.urlencode(form, safe=",")
scheme_netloc_path = "http://maps.googleapis.com/maps/api/geocode/json"
with urllib.request.urlopen(scheme_netloc_path + "?" + query) as geocode:
response = json.loads(geocode.read().decode("UTF-8"))
lat_lon = response['results'][0]['geometry']['location']
business.latitude = lat_lon['lat']
business.longitude = lat_lon['lng']
return business
def get_food_facility_history(cat_key):
url_detail = "/Clients/VDH/Norfolk/Norolk_Website.nsf/Food-FacilityHistory"
form = {
"OpenView": "",
"RestrictToCategory": cat_key
}
query = urllib.parse.urlencode(form)
with urllib.request.urlopen(scheme_host + url_detail + "?" + query) as data:
soup = BeautifulSoup(data.read())
return soup
def inspection_detail(business):
soup = get_food_facility_history(business.category)
business.name2 = soup.body.h2.text.strip()
table = soup.body.table
for row in table.find_all("tr"):
column = list(row.find_all("td"))
name = column[0].text.strip()
value = column[1].text.strip()
setattr(business, vdh_detail_translate[name], value)
return business
def get_chicago_json():
form = {
"accessType": "DOWNLOAD",
"$where": "inspection_date>2015-01-01",
}
query = urllib.parse.urlencode(form)
scheme_netloc_path = "https://data.cityofgchicago.org/api/views/4ijn-s7e5/rows.json"
with urllib.request.urlopen(scheme_netloc_path + "?" + query) as data:
with open("chicago_data.json", "w") as output:
output.write(data.read())
def food_row_iter(): # create object from json
with open("chicago_data.json", encoding="UTF-8") as data_file:
inspections = json.load(data_file)
headings = [item['fieldName'] for item in inspections["meta"]["view"]["columns"]]
for row in inspections["data"]:
data = SimpleNamespace(**dict(zip(headings, row)))
yield data
def parse_details(business):
business.latitude = float(business.latitude)
business.longitude = float(business.longitude)
if business.violations is None:
business.details = []
else:
business.details = [v.strip for v in business.violations.split("|")]
return business
def choice_iter_norfolk():
n_base = SimpleNamespace(address='333 Waterside Drive')
geocode_detail(n_base)
print(n_base)
soup = get_food_list_by_name()
for row in food_row_iter():
for row in food_table_iter(soup):
geocode_detail(row)
inspection_detail(row)
row.distance = harversine((row.latitude, row.longitude), (n_base.latitude, n_base.longitude))
yield row
def choice_iter_chicago():
c_base = SimpleNamespace(address='3420 W GRACE ST')
geocode_detail(c_base)
print(c_base)
for row in food_row_iter():
try:
parse_details(row)
row.distance = harversine((row.latitude, row.longitude),
(c_base.latitude, c_base.longitude))
yield row
except TypeError:
pass
# main code of the app
soup = get_food_list_by_name()
raw_column = food_table_iter(soup)
for business in choice_iter_norfolk():
print('name ', business.name, ' address ', business.address, ' lat ', business.latitude, ' lon ',
business.longitude,
' phone ', business.phone_number, ' type ', business.facility_type)
get_chicago_json()
for business in choice_iter_chicago():
print('name ', business.dba_name, ' address ', business.address, ' lat ', business.latitude, ' lon ',
business.longitude,
' phone ', business.phone_number, ' type ', business.facility_type, ' results ', business.results)
|
Comunitea/CMNT_00040_2016_ELN_addons
|
eln_reports/report/invoice/invoice_report_parser.py
|
Python
|
agpl-3.0
| 2,501 | 0.0012 |
# -*- coding: utf-8 -*-
# Copyright 2021 El Nogal - Pedro Gómez <pegomez@elnogal.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import registry
from openerp.addons import jasper_reports
def parser(cr, uid, ids, data, context):
parameters = {}
name = 'report.invoice_report_jasper'
model = 'account.invoice'
data_sourc
|
e = 'model'
uom_obj = registry(cr.dbname).get('product.uom')
invoice_obj = registry(cr.dbname).get('account.invoice')
invoice_ids = invoice_obj.browse(cr, uid, ids, context)
language = list(set(invoice_ids.mapped('partner_id.lang')))
if len(language) == 1:
context['lang'] = language[0]
invoice_lines_ids = {}
for invoice_id in invoice_ids:
language = invoice_id.partner_id.lang or 'es_ES'
invoice_lines_ids[str(invoic
|
e_id.id)] = []
for line in invoice_id.invoice_line:
product_id = line.product_id.with_context(lang=language)
uom_id = product_id.uom_id
uos_id = line.uos_id.with_context(lang=language)
uos_qty = line.quantity
uom_qty = uom_obj._compute_qty(cr, uid, uos_id.id, uos_qty, uom_id.id)
price_unit = line.price_unit
if uos_id and uos_id != uom_id:
price_unit = line.price_unit * uos_qty / uom_qty
vals = {
'invoice_id': invoice_id.id,
'prod_code': product_id.default_code or '',
'prod_ean13': product_id.ean13 or '',
'prod_name': line.name or product_id.name or '',
'origin': line.origin or '',
'client_order_ref': line.stock_move_id.picking_id.client_order_ref or '',
'uom_qty': uom_qty,
'uos_qty': uos_qty,
'uom_name': uom_id.name or '',
'uos_name': uos_id.name or uom_id.name or '',
'price_unit': price_unit or 0.0,
'discount': line.discount or 0.0,
'price_subtotal': line.price_subtotal,
'taxes': line.tax_str or '',
}
invoice_lines_ids[str(invoice_id.id)].append(vals)
parameters['invoice_lines_ids'] = invoice_lines_ids
return {
'ids': ids,
'name': name,
'model': model,
'records': [],
'data_source': data_source,
'parameters': parameters,
}
jasper_reports.report_jasper('report.invoice_report_jasper', 'account.invoice', parser)
|
HelloLily/hellolily
|
lily/cases/migrations/0019_auto_20170418_1243.py
|
Python
|
agpl-3.0
| 425 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies
|
= [
('cases', '0018_auto_20170418_1220'),
]
operations = [
migration
|
s.AlterField(
model_name='case',
name='type',
field=models.ForeignKey(related_name='cases', to='cases.CaseType'),
),
]
|
PaulGregor/crowdin-cli
|
crowdin/methods.py
|
Python
|
mit
| 19,747 | 0.003393 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
try:
from crowdin.connection import Connection, Configuration
except ImportError:
from connection import Connection, Configuration
import six
import logging
import json
import zipfile
import shutil
import io
import os
logger = logging.getLogger('crowdin')
class Methods:
def __init__(self, any_options, options_config):
# Get options arguments from console input
self.any_options = any_options
# Get parsed config file
self.options_config = options_config
self.project_info = {}
self.languages_list = []
# Main connection method to interact with connection.py
def true_connection(self, url, params, api_files=None, additional_parameters=None):
return Connection(self.options_config, url, params, api_files, self.any_options,
additional_parameters).connect()
def get_info(self):
# POST https://api.crowdin.com/api/project/{project-identifier}/info?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/info', 'url_par4': True}
params = {'json': 'json'}
self.project_info = json.loads(self.true_connection(url, params).decode())
def get_info_files(self):
if not self.project_info:
self.get_info()
return self.project_info['files']
def get_info_lang(self):
if not self.project_info:
self.get_info()
return self.project_info['languages']
def get_info_branches(self):
if not self.project_info:
self.get_info()
branches = set()
for item in self.project_info['files']:
if item['node_type'] == 'branch':
branches.add(item['name'])
return branches
def lang(self):
if not self.languages_list:
data = json.loads(self.supported_languages().decode())
my_lang = self.get_info_lang()
for i in data:
for l in my_lang:
if i['crowdin_code'] == l['code']:
self.languages_list.append(i)
return self.languages_list
def parse(self, data, parent='', branch=False):
if data is None or not len(data):
yield parent + ('/' if data is not None and not len(data) else '')
else:
if branch:
for node in data:
if node.get('node_type') == 'branch' and node.get('name') == branch:
# remove branch name from files hierarchy
for result in self.parse(node.get('files'), parent, branch=False):
yield result
else:
for node in data:
if node.get('node_type') != 'branch':
for result in self.parse(node.get('files'), parent + '/' + node.get('name')):
yield result
def create_directory(self, name, is_branch=False):
# POST https://api.crowdin.net/api/project/{project-identifier}/add-directory?key={project-key}
logger.info("Creating remote {type} {name}".format(name=name, type='directory' if not is_branch else 'branch'))
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/add-directory', 'url_par4': True}
params = {'name': name, 'json': 'json'}
if is_branch:
params['is_branch'] = 1
if self.any_options.branch and not is_branch:
params['branch'] = self.any_options.branch
return self.true_connection(url, params)
def upload_files(self, files, export_patterns, parameters, item):
# POST https://api.crowdin.com/api/project/{project-identifier}/add-file?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/add-file', 'url_par4': True}
if item[0] == '/':
sources = item[1:]
else:
sources = item
params = {'json': 'json', 'export_patterns[{0}]'.format(sources): export_patterns,
'titles[{0}]'.format(sources): parameters.get('titles'),
'type': parameters.get('type'),
'first_line_contains_header': parameters.get('first_line_contains_header'),
'scheme': parameters.get('scheme'), 'translate_content': parameters.get('translate_content'),
'translate_attributes': parameters.get('translate_attributes'),
'content_segmentation': parameters.get('content_segmentation'),
'translatable_elements': parameters.get('translatable_elements'),
'escape_quotes': parameters.get('escape_quotes', '3')}
if self.any_options.branch:
params['branch'] = self.any_options.branch
additional_parameters = {'file_name': sources, 'action_type': "Uploading"}
try:
with open(files, 'rb') as f:
api_files = {'files[{0}]'.f
|
ormat(sources): f}
return self.true_connection(url, params, api_files, additional_parameters)
except(OSError, IOError) as e:
print(e, "\n Skipped")
def update_files(self, files, export_patterns, parameters, item):
# POST https://api.crowdin.com/api/project/{project-identifier}/update-file?key={p
|
roject-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/update-file', 'url_par4': True}
if item[0] == '/':
sources = item[1:]
else:
sources = item
params = {'json': 'json', 'export_patterns[{0}]'.format(sources): export_patterns,
'titles[{0}]'.format(sources): parameters.get('titles'),
'first_line_contains_header': parameters.get('first_line_contains_header'),
'scheme': parameters.get('scheme'),
'update_option': parameters.get('update_option'),
'escape_quotes': parameters.get('escape_quotes', '3')}
if self.any_options.branch:
params['branch'] = self.any_options.branch
additional_parameters = {'file_name': sources, 'action_type': "Updating"}
try:
with open(files, 'rb') as f:
api_files = {'files[{0}]'.format(sources): f}
# print files
return self.true_connection(url, params, api_files, additional_parameters)
except(OSError, IOError) as e:
print(e, "\n Skipped")
def upload_translations_files(self, translations, language, source_file):
# POST https://api.crowdin.com/api/project/{project-identifier}/upload-translation?key={project-key
url = dict(post='POST', url_par1='/api/project/', url_par2=True, url_par3='/upload-translation', url_par4=True)
options_dict = vars(self.any_options)
params = {'json': 'json', 'language': language,
'auto_approve_imported': options_dict.get('imported', '0'),
'import_eq_suggestions': options_dict.get('suggestions', '0'),
'import_duplicates': options_dict.get('duplicates', '0')}
if self.any_options.branch:
params['branch'] = self.any_options.branch
additional_parameters = {'file_name': source_file, 't_l': language, 'action_type': "translations"}
try:
with open(translations, 'rb') as f:
api_files = {'files[{0}]'.format(source_file): f}
# print files
return self.true_connection(url, params, api_files, additional_parameters)
except(OSError, IOError) as e:
print(e, "\n Skipped")
def preserve_hierarchy(self, common_path):
common_path = [i[1:] if i[:1] == '/' and i.count('/') == 1 else i for i in common_path]
preserve_hierarchy = Configuration(self.options_config).preserve_hierarchy
if preserve_hierarchy is False:
for i in common_path:
if i.count('
|
bourneagain/pythonBytes
|
cloneGraph_BFS.py
|
Python
|
mit
| 1,080 | 0.012037 |
class UndirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
#using DFS
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
|
seen={}
visited=[]
seen[None] = None
|
head = UndirectedGraphNode(node.label)
seen[node] = head
visited.append(node)
while len(visited) != 0:
refNode = visited.pop()
for n in refNode.neighbors:
if n not in seen:
neighBorNode = UndirectedGraphNode(n.label)
seen[refNode].neighbors.append(neighBorNode)
seen[n] = neighBorNode
visited.append(n)
else:
seen[refNode].neighbors.append(seen[n])
return head
A=UndirectedGraphNode(2)
B=UndirectedGraphNode(3)
C=UndirectedGraphNode(4)
A.neighbors.append(B)
A.neighbors.append(C)
B.neighbors.append(C)
N=Solution()
for i in N.cloneGraph(A).neighbors:
print i.label
|
tsileo/blobstash-python-docstore
|
blobstash/docstore/query.py
|
Python
|
mit
| 4,218 | 0.000711 |
"""Query utils."""
class LuaScript:
def __init__(self, script):
self.script = script
class LuaStoredQuery:
def __init__(self, name, query_args):
self.name = name
self.args = query_args
class LogicalOperator:
def __init__(self, *args):
self.clauses = args
def __str__(self):
return " {} ".format(self.OPERATOR).join(
[str(clause) for clause in self.clauses]
)
class Or(LogicalOperator):
OPERATOR = "or"
class And(LogicalOperator):
OPERATOR = "and"
class Not:
def __init__(self, clause):
self.clause = clause
def __str__(self):
return "not ({})".format(str(self.clause))
def _lua_repr(value):
if isinstance(value, bytes):
return repr(value.decode("utf-8"))
elif isinstance(value, bool):
if value:
return "true"
return "false"
elif isinstance(value, str):
return repr(value)
elif isinstance(value, (float, int)):
return value
elif isinstance(value, type(None)):
return "nil"
# XXX(tsileo): should `dict`/`list` be supported?
else:
raise ValueError("unsupported data type: {}".format(type(value)))
class LuaShortQuery:
def __init__(self, key, value, operator):
self.key = key
self.value = value
self.operator = operator
def query(self):
return "match(doc, '{}', '{}', {})".format(self.key, self.operator, self.value)
def __str__(self):
return self.query()
class LuaShortQueryComplex:
def __init__(self, query):
self.query = query
def __str__(self):
return self.query
class _MetaQuery(type):
def __getitem__(cls, key):
if isinstance(key, int):
return cls("[{}]".format(key + 1))
return cls(".{}".format(key))
class Q(metaclass=_MetaQuery):
"""Allow for query:
>>> Q['persons_count'] > 5
>>> Q['persons'][0]['name'] == 'thomas'
>>> Q['l'].contains(10)
>>> Q['persons'].contains(Q['name'] == 'thomas')
"""
def __init__(self, path=None):
self._path = path or ""
def __getitem__(self, key):
if isinstance(key, int):
self._path = self._path + "[{}]".format(key + 1)
return self
self._path = self._path + ".{}".format(key)
return self
def path(self):
return self._path[1:]
def __repr__(self):
return "Q(path={})".format(self._path)
def any(self, values):
return LuaShortQueryComplex(
" or ".join(
[
|
"get_path(doc, '{}') == {}".format(self.path(), _lua_repr(value))
for value in values
]
)
)
def not_any(self, values):
|
return LuaShortQueryComplex(
" or ".join(
[
"get_path(doc, '{}') ~= {}".format(self.path(), _lua_repr(value))
for value in values
]
)
)
def contains(self, q):
if isinstance(q, LuaShortQuery):
if q.operator != "EQ":
raise ValueError("contains only support pure equality query")
return LuaShortQueryComplex(
"in_list(doc, '{}', {}, '{}')".format(
self.path(), _lua_repr(q.value), q.key
)
)
elif isinstance(q, LuaShortQueryComplex):
raise ValueError("query too complex to use in contains")
return LuaShortQueryComplex(
"in_list(doc, '{}', {})".format(self.path(), _lua_repr(q))
)
def __eq__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "EQ")
def __ne__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "NE")
def __lt__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "LT")
def __le__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "LE")
def __ge__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "GE")
def __gt__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "GT")
|
bfontaine/wptranslate
|
tests/test_mediawiki.py
|
Python
|
mit
| 1,013 | 0.002962 |
# -*- coding: UTF-8 -*-
import responses
from helpers import TestCase
from wptranslate.mediawiki import query
class TestMediaWiki(TestCase):
def setUp(self):
self.lang = 'foo'
self.url = 'https://%s.wikipedia.org/w/api.php' % self.lang
@responses.activate
def test_query_return_none_on_error(self):
responses.add(responses.GET, self.url, body='{}', status=404)
self.assertNone(query({}, lang=self.lang))
self.assertEquals(1, len(responses.calls))
@responses.activate
def test_query_return_none_on_wrong_resp(self):
responses.add(responses.GET, self.url, body='{}', status=200)
self.assertN
|
one(query({}, lang=self.lang))
self.assertEquals(1, len(responses.calls))
@responses.activate
def test_query_return_query_param(self):
responses.add(responses.GET, self.url, body='{"query": 42}', status=200)
self.assertEquals(42, query({}, lang=self.lang))
self.assertEquals(1, len(responses.calls))
| |
nmgeek/npTDMS
|
nptdms/tdmsinfo.py
|
Python
|
lgpl-3.0
| 1,875 | 0 |
from __future__ import print_function
from argparse import ArgumentParser
import logging
from nptdms import tdms
def main():
parser = ArgumentParser(
description="List the contents of a LabView TDMS file.")
parser.add_argument(
'-p', '--properties', action="store_true",
help="Include channel properties.")
parser.add_argument(
|
'-d', '--debug', action="store_true",
help="Print debugging information to stderr.")
parser.add_argument(
'tdms_file',
hel
|
p="TDMS file to read.")
args = parser.parse_args()
if args.debug:
logging.getLogger(tdms.__name__).setLevel(logging.DEBUG)
tdmsfile = tdms.TdmsFile(args.tdms_file)
level = 0
root = tdmsfile.object()
display('/', level)
if args.properties:
display_properties(root, level)
for group in tdmsfile.groups():
level = 1
try:
group_obj = tdmsfile.object(group)
display("%s" % group_obj.path, level)
if args.properties:
display_properties(group_obj, level)
except KeyError:
# It is possible to have a group without an object
display("/'%s'" % group, level)
for channel in tdmsfile.group_channels(group):
level = 2
display("%s" % channel.path, level)
if args.properties:
level = 3
if channel.data_type is not None:
display("data type: %s" % channel.data_type.name, level)
display_properties(channel, level)
def display_properties(tdms_object, level):
if tdms_object.properties:
display("properties:", level)
for prop, val in tdms_object.properties.items():
display("%s: %s" % (prop, val), level)
def display(s, level):
print("%s%s" % (" " * 2 * level, s))
|
dennissergeev/pyveccalc
|
pyveccalc/__init__.py
|
Python
|
mit
| 314 | 0 |
# -*- coding: utf-8 -*-
"""
Wind vector calculations in finite differences
"""
from . import
|
iris_api
from . import standard
from . import tools
from . import utils
# List to define the behaviour of imports of the form:
# from pyveccalc import *
__all__ = []
# Package version number.
__version_
|
_ = '0.2.9'
|
xpharry/Udacity-DLFoudation
|
tutorials/reinforcement/gym/gym/tests/test_core.py
|
Python
|
mit
| 353 | 0.005666 |
from gym import core
class ArgumentEnv(core.Env):
|
calls = 0
def __init__(self, arg):
self.calls += 1
self.arg = arg
def test_env_instantiatio
|
n():
# This looks like a pretty trivial, but given our usage of
# __new__, it's worth having.
env = ArgumentEnv('arg')
assert env.arg == 'arg'
assert env.calls == 1
|
vjFaLk/frappe
|
frappe/patches/v9_1/add_sms_sender_name_as_parameters.py
|
Python
|
mit
| 651 | 0.021505 |
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("core", "doctype", "sms_parameter")
sms_sender_name = frappe.db.get_single_value("SMS Settings", "sms_sender_name")
if sms_sender_name:
frappe.reload_doc("core", "doctype", "sms_settings")
sms_settings = frappe.get_doc(
|
"SMS Settings")
sms_settings.append("parameters", {
"parameter": "sender_name",
"value": sms_sender_name
})
sms_settings.flags.ignore_mandatory = True
sms_settings.flags.ignore_permissions = True
|
sms_settings.save()
|
ultrabug/uhashring
|
uhashring/ring.py
|
Python
|
bsd-3-clause
| 11,224 | 0.000178 |
from bisect import bisect
from uhashring.ring_ketama import KetamaRing
from uhashring.ring_meta import MetaRing
class HashRing:
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
|
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_no
|
des(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict,"
" got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [
c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")
]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
de
|
gammu/wammu
|
Wammu/App.py
|
Python
|
gpl-3.0
| 2,021 | 0.000993 |
# -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
'''
Wammu - Phone manager
Main Wammu application
'''
from __future__ import unicode_literals
from __future__ import print_function
import wx
import sys
import Wammu.Main
import Wammu.Error
from Wammu.Locales import StrConv
from Wammu.Locales import ugettext as _
class WammuApp(wx.App):
'''
Wammu appliction class, it initializes wx and creates main Wammu window.
'''
def OnInit(self):
'''
wxWindows call this method to initialize the application.
'''
self.locale = wx.Locale
|
(wx.LANGUAGE_DEFAULT)
self.SetAppName('Wammu')
vendor = StrConv('Michal Čihař')
if vendor.find('?') != -1:
vendor = 'Michal Čihař'
self.SetVendorName(vendor)
frame = Wammu.Main.WammuFrame(None, -1)
Wammu.Error.HANDLER_PARENT = frame
frame.Show(True)
frame.PostInit(self)
sel
|
f.SetTopWindow(frame)
# Return a success flag
return True
def Run():
'''
Wrapper to execute Wammu. Installs graphical error handler and launches
WammuApp.
'''
try:
sys.excepthook = Wammu.Error.Handler
except:
print(_('Failed to set exception handler.'))
app = WammuApp()
app.MainLoop()
|
jordan9001/CABS
|
Source/Broker/CABS_server.py
|
Python
|
apache-2.0
| 32,754 | 0.009007 |
#!/usr/bin/python
## CABS_Server.py
# This is the webserver that is at the center of the CABS system.
# It is asynchronous, and as such the callbacks and function flow can be a bit confusing
# The basic idea is that the HandleAgentFactory and HandleClienFactory make new HandleAgents and Handle Clients
# There is one per connection, and it processes all communication on that connection, without blocking
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import ssl, reac
|
tor, endpoints, defer, task
from twisted.protocols.basic import LineOnlyReceiver
from twisted.protocols.policies import TimeoutMixin
from twisted.enterprise import adbapi
from twisted.names import client
from twisted.pyt
|
hon import log
import ldap
import sys
import logging
import random
import os
from time import sleep
#global settings dictionary
settings = {}
#global database pool
dbpool = adbapi.ConnectionPool
#global blacklist set
blacklist = set()
#make a logger
logger=logging.getLogger()
random.seed()
## Handles each Agent connection
class HandleAgent(LineOnlyReceiver, TimeoutMixin):
def __init__(self, factory):
self.factory = factory
#timeout after 9 seconds
self.setTimeout(9)
def connectionMade(self):
self.agentAddr = self.transport.getPeer()
logger.debug('Connection made with {0}'.format(self.agentAddr))
self.factory.numConnections = self.factory.numConnections + 1
logger.debug('There are {0} Agent connections'.format(self.factory.numConnections))
def connectionLost(self, reason):
logger.debug('Connection lost with {0} due to {1}'.format(self.agentAddr,reason))
self.factory.numConnections = self.factory.numConnections - 1
logger.debug('There are {0} Agent connections'.format(self.factory.numConnections))
def lineLengthExceeded(self, line):
logger.error('Agent at {0} exceeded the Line Length'.format(self.agentAddr))
self.transport.abortConnection()
def lineReceived(self, line):
#types of reports = status report (sr) and status process report (spr)
report = line.split(':')
if report[0] == 'sr' or report[0] == 'spr':
status = None
if report[0] == 'spr':
status = report.pop(1)
if status.endswith('-1'):
status = status.rstrip('-1') + ' : Unknown'
elif status.endswith('0'):
status = status.rstrip('0') + ' : Not Found'
elif status.endswith('1'):
status = status.rstrip('1') + ' : Not Running'
elif status.endswith('2'):
status = status.rstrip('2') + ' : Not Connected'
elif status.endswith('3'):
status = status.rstrip('3') + ' : Okay'
logger.debug("The process on {0} is {1}".format(report[1], status))
logger.debug('There are {0} users on {1}'.format(len(report)-2, report[1]))
#Mark the machine as active, and update timestamp
querystring = "UPDATE machines SET active = True, last_heartbeat = NOW(), status = %s WHERE machine = %s"
r1 = dbpool.runQuery(querystring, (status, report[1]))
#confirm any users that reserved the machine if they are there, or unconfirm them if they are not
#For now we don't support assigning multiple users per machine, so only one should be on at a time
#but, if we do have multiple, let it be so
#Try to write an entry under the first listed users name, if duplicate machine update the old entry to confirmed
users = ''
if len(report) > 2:
for item in range(2, len(report)):
users += report[item] + ', '
users = users[0:-2]
logger.info("Machine {0} reports user {1}".format(report[1],users))
regexstr = ''
for item in range(2, len(report)):
regexstr += '(^'
regexstr += report[item]
regexstr += '$)|'
regexstr = regexstr[0:-1]
if settings.get("One_Connection") == 'True' or settings.get("One_Connection") == True:
querystring = "INSERT INTO current VALUES (%s, NULL, %s, True, NOW()) ON DUPLICATE KEY UPDATE confirmed = True, connecttime = Now(), user = %s"
r2 = dbpool.runQuery(querystring,(report[2],report[1],users))
querystring = "UPDATE current SET confirmed = True, connecttime = Now() WHERE (machine = %s AND user REGEXP %s)"
r2 = dbpool.runQuery(querystring,(report[1],regexstr))
else:
querystring = "DELETE FROM current WHERE machine = %s"
r2 = dbpool.runQuery(querystring, (report[1],))
## Creates a HandleAgent for each connection
class HandleAgentFactory(Factory):
def __init__(self):
self.numConnections = 0
def buildProtocol(self, addr):
#Blacklist check here
if addr.host in blacklist:
logger.debug("Blacklisted address {0} tried to connect".format(addr.host))
protocol = DoNothing()
protocol.factory = self
return protocol
#limit connection number here
if (settings.get("Max_Agents") is not None and settings.get("Max_Agents") != 'None') and (int(self.numConnections) >= int(settings.get("Max_Agents"))):
logger.warning("Reached maximum Agent connections")
protocol = DoNothing()
protocol.factory = self
return protocol
return HandleAgent(self)
## Handles each Client Connection
class HandleClient(LineOnlyReceiver, TimeoutMixin):
def __init__(self, factory):
self.factory = factory
self.setTimeout(9)#set timeout of 9 seconds
def connectionMade(self):
#if auto then add to blacklist
self.clientAddr = self.transport.getPeer()
logger.debug('Connection made with {0}'.format(self.clientAddr))
self.factory.numConnections = self.factory.numConnections + 1
logger.debug('There are {0} Client connections'.format(self.factory.numConnections))
def connectionLost(self, reason):
logger.debug('Connection lost with {0} due to {1}'.format(self.clientAddr,reason))
self.factory.numConnections = self.factory.numConnections - 1
logger.debug('There are {0} Client connections'.format(self.factory.numConnections))
def lineLengthExceeded(self, line):
logger.error('Client at {0} exceeded the Line Length'.format(self.clientAddr))
self.transport.abortConnection()
def lineReceived(self, line):
#We can receieve 2 types of lines from a client, pool request (pr), machine request(mr)
request = line.split(':')
if request[0].startswith('pr'):
if request[0].endswith('v') and settings.get('RGS_Ver_Min') != 'False':
#check version
print "###################################" + settings.get('RGS_Ver_Min')
logger.debug('User {0} at {1} is using RGS {2}'.format(request[1], self.clientAddr, request[-1]))
if request[-1] < settings.get('RGS_Ver_Min'):
self.transport.write("Err:Sorry, your RGS reciever is out of date, it should be at least {0}".format(settings.get('RGS_Ver_Min')))
self.transport.loseConnection()
logger.info('User {0} requested pool info from {1}'.format(request[1],self.clientAddr))
#authenticate_user
#get pools for user
try:
self.getAuthLDAP(request[1],request[2]).addCallback(self.writePools)
except:
logger.debug("Could not get Pools")
self.transport.write("Err:Could not authenticate to authentication server")
self.transport.loseConnection()
elif request[0] == 'mr':
logger.info('User {0} requested a machine in pool {1} from {2}'.format(request[1],request[3],self.clientAddr
|
jhavstad/model_runner
|
src/ScrollListViewTest.py
|
Python
|
gpl-2.0
| 469 | 0.006397 |
import PyQtExtras
from PyQt5.QtWidgets import QFrame, QAppli
|
cation
import sys
def main(args):
app = QApplication([])
main_frame = QFrame()
list_view = PyQtExtras.ListScrollArea(main_frame)
list_view.add_item_by_string('Item 1')
list_view.add_item_by_string('Item 2')
list_view.add_item_by_string('Item 3')
list_view.remove_item_by_string('Item 1')
main_frame.show()
app.exec_()
if __name__ == '__main__':
main(
|
sys.argv)
|
jigarkb/CTCI
|
LeetCode/015-M-3Sum.py
|
Python
|
mit
| 1,646 | 0.00486 |
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets
# in the array which gives the sum of zero.
#
# Note: The solution set must not contain duplicate triplets.
#
# For example, given array S = [-1, 0, 1, 2, -1, -4],
#
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in xrange(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
l, r = i + 1, len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s < 0:
l += 1
elif s
|
> 0:
|
r -= 1
else:
res.append((nums[i], nums[l], nums[r]))
while l < r and nums[l] == nums[l + 1]:
l += 1
while l < r and nums[r] == nums[r - 1]:
r -= 1
l += 1
r -= 1
return res
# Note:
# Iterating through the list with the pointer i and then we try to find two extra numbers to sum to 0.
# Since the list is ordered, the right pointer will always be higher than the left pointer.
# So if the sum is too large, you can move the right pointer back one. On the other hand, if the sum is
# too small (below 0), then move the middle pointer up one.
#
# To avoid duplicates, we skip further evaluation if pointer i equals pointer i-1
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py
|
Python
|
mit
| 8,184 | 0.004399 |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from devtools_testutils.aio import recorded_by_proxy_async
from azure.ai.formrecognizer._generated.models import AnalyzeResultOperation
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
from azure.ai.formrecognizer import AnalyzeResult
from preparers import FormRecognizerPreparer
from asynctestcase import AsyncFormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
DocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient)
class TestDACAnalyzeDocumentAsync(AsyncFormRecognizerTest):
def teardown(self):
self.sleep(4)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_stream_transform_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_stream_transform_jpg(self, client):
with open(self.form_jpg, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_multipage_transform(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_multipage_table_span_pdf(self, client, **kwargs):
with open(self.multipage_table_pdf, "rb") as fd:
my_file = fd.read()
async with client:
poller = await client.begin_analyze_document("prebuilt-document", my_file)
document = await poller.result()
assert len(document.tables) == 3
assert document.tables[0].row_count == 30
assert document.tables[0].column_count == 5
assert document.tables[1].row_count == 6
assert document.tables[1].column_count == 5
assert document.tables[2].row_count == 23
assert document.tables[2].column_count == 5
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async d
|
ef test_document_specify_pages(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
document = fd.read()
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1")
result = await poller.result()
assert len(result.pages) == 1
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1, 3")
result = await poller.re
|
sult()
assert len(result.pages) == 2
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2")
result = await poller.result()
assert len(result.pages) == 2
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2, 3")
result = await poller.result()
assert len(result.pages) =
|
apahomov/django-sphinx
|
djangosphinx/apis/api281/__init__.py
|
Python
|
bsd-3-clause
| 30,427 | 0.052059 |
#
# $Id: sphinxapi.py 2970 2011-09-23 16:50:22Z klirichek $
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2011, Andrew Aksyonoff
# Copyright (c) 2008-2011, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License. You should have
# received a copy of the GPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SEARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x119
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x102
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x100
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_
|
ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 931
|
2 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_ALL # query matching mode (default is SPH_MATCH_ALL)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
assert(isinstance(port, int))
self._host = host
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
sel
|
j-windsor/cs3240-f15-team21-v2
|
accounts/urls.py
|
Python
|
mit
| 876 | 0.004566 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^groups
|
/$', views.groups, name='groups'),
url(r'^sitemanager/$', views.sitemanager, name='sitemanager'),
url(r'^(?P<user_id>[0-9]+)/user_view/$', views.user_view, name='user_view'),
url(r'^(?P<user_id>[0-9]+)/deactivate/$', views.deactivate, name='deactivate'),
url(r'^(?P<user_id>[0-9]+)/activate/$', views.activate, name='activate'),
url(r'^(?P<user_id>[0-9]+)/makeSiteManager/$', views.makeSiteM
|
anager, name='makeSiteManager'),
url(r'^(?P<user_id>[0-9]+)/unmakeSiteManager/$', views.unmakeSiteManager, name='unmakeSiteManager'),
url(r'^groups/sitemanager/$', views.groupsSM, name='groupsSM'),
]
|
hephaestus9/Ironworks
|
modules/plugins/nzbget.py
|
Python
|
mit
| 3,148 | 0.002224 |
from flask import render_template, jsonify, request
from jsonrpclib import jsonrpc
import b
|
ase64
import urllib
from maraschino import app, logger
from maraschino.tools import
|
*
def nzbget_http():
if get_setting_value('nzbget_https') == '1':
return 'https://'
else:
return 'http://'
def nzbget_auth():
return 'nzbget:%s@' % (get_setting_value('nzbget_password'))
def nzbget_url():
return '%s%s%s:%s' % (nzbget_http(), \
nzbget_auth(), \
get_setting_value('nzbget_host'), \
get_setting_value('nzbget_port'))
def nzbget_exception(e):
logger.log('NZBGet :: EXCEPTION -- %s' % e, 'DEBUG')
@app.route('/xhr/nzbget/')
@requires_auth
def xhr_nzbget():
downloads = status = nzbget = None
logger.log('NZBGet :: Getting download list', 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.status()
downloads = nzbget.listgroups()
except Exception as e:
nzbget_exception(e)
logger.log('NZBGet :: Getting download list (DONE)', 'INFO')
return render_template('nzbget/queue.html',
nzbget=status,
downloads=downloads,
)
@app.route('/xhr/nzbget/queue/<action>/')
@requires_auth
def queue_action_nzbget(action):
status = False
logger.log('NZBGet :: Queue action: %s' % action, 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
if 'resume' in action:
status = nzbget.resume()
elif 'pause' in action:
status = nzbget.pause()
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
@app.route('/xhr/nzbget/queue/add/', methods=['POST'])
@requires_auth
def queue_add_nzbget():
status = False
if len(nzb):
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
nzb = request.form['url']
nzb = urllib.urlopen(nzb).read()
status = nzbget.append('test', '', False, base64.encode(nzb))
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
@app.route('/xhr/nzbget/individual/<int:id>/<action>/')
@requires_auth
def individual_action_nzbget(id, action):
status = False
logger.log('NZBGet :: Item %s action: %s' % (id, action), 'INFO')
if 'resume' in action:
action = 'GroupResume'
elif 'pause' in action:
action = 'GroupPause'
elif 'delete' in action:
action = 'GroupDelete'
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.editqueue(action, 0, '', id)
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status, 'id': id, 'action': action})
@app.route('/xhr/nzbget/set_speed/<int:speed>/')
@requires_auth
def set_speed_nzbget(speed):
logger.log('NZBGet :: Setting speed limit: %s' % speed, 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.rate(speed)
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
|
cmry/ebacs
|
corks.py
|
Python
|
bsd-3-clause
| 2,583 | 0 |
import bottle
from cork import Cork
from utils import skeleton
aaa = Cork('users', email_sender='c.emmery@outlook.com',
smtp_url='smtp://smtp.magnet.ie')
authorize = aaa.make_auth_decorator(fail_redirect='/login', role="user")
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip()
@bottle.post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
aaa.login(username, password, success_redirect='/', fail_redirect='/login')
@bottle.route('/logout')
def logout():
aaa.logout(success_redirect='/login')
@bottle.post('/register')
def register():
"""Send out registration email"""
aaa.register(post_get('username'), post_get('password'),
post_get('email_address'))
return 'Please check your mailbox.'
@bottle.route('/validate_registration/:registration_code')
def validate_registration(registration_code):
"""Validate registration, create user account"""
aaa.validate_registration(registration_code)
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/change_password')
def change_password():
"""Change password"""
aaa.reset_password(post_get('reset_code'), post_get('password'))
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/create_user')
def create_user():
try:
aaa.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_user')
def delete_user():
try:
aaa.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception as e:
print(repr(e))
return dict(ok=False, msg=e.message)
@bottle.post('/create_role')
def create_role():
try:
|
aaa.create_role(post_get('role'), post_get('level'))
return dict(ok
|
=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_role')
def delete_role():
try:
aaa.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
# Static pages
@bottle.route('/login')
def login_form():
"""Serve login form"""
return skeleton(bottle.template('login_form'))
@bottle.route('/sorry_page')
def sorry_page():
"""Serve sorry page"""
return '<p>Sorry, you are not authorized to perform this action</p>'
|
mrcslws/nupic.research
|
src/nupic/research/frameworks/vernon/mixins/sync_batchnorm.py
|
Python
|
agpl-3.0
| 1,943 | 0.001544 |
# ---------------------------------------
|
-------------------------------
# Numenta Platfo
|
rm for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import torch.nn as nn
class SyncBatchNorm:
"""
This mixin converts the BatchNorm modules to SyncBatchNorm modules when utilizing
distributed training on GPUs.
Example config:
config=dict(
use_sync_batchnorm=True
)
"""
def create_model(self, config, device):
model = super().create_model(config, device)
use_sync_batchnorm = config.get("use_sync_batchnorm", True)
distributed = config.get("distributed", False)
if use_sync_batchnorm and distributed and next(model.parameters()).is_cuda:
# Convert batch norm to sync batch norms
model = nn.modules.SyncBatchNorm.convert_sync_batchnorm(module=model)
return model
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["setup_experiment"].insert(0, "Sync Batchnorm begin")
eo["setup_experiment"].append("Sync Batchnorm end")
return eo
|
vimalkvn/riboseqr_wrapper
|
riboseqr/ribosome_profile.py
|
Python
|
gpl-2.0
| 4,816 | 0.000208 |
#!/usr/bin/env python
import os
import sys
import glob
import argparse
import logging
import rpy2.robjects as robjects
import utils
rscript = ''
R = robjects.r
def run_rscript(command=None):
"""Run R command, log it, append to rscript"""
global rscript
if not command:
return
logging.debug(command)
rscript += '{}\n'.format(command)
msg = R(command)
def plot_transcript(rdata_load='Metagene.rda', transcript_name='',
transcript_length='27', transcript_cap='',
html_file='Plot-ribosome-profile.html',
output_path=os.getcwd()):
"""Plot ribosome profile for a given transcript. """
options = {}
for key, value, rtype, rmode in (
('transcript_name', transcript_name, 'str', None),
('transcript_length', transcript_length, 'int', 'charvector'),
('transcript_cap', transcript_cap, 'int', None)):
options[key] = utils.process_args(value, ret_type=rtype, ret_mode=rmode)
run_rscript('suppressMessages(library(riboSeqR))')
run_rscript('load("{}")'.format(rdata_load))
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2//EN">
<html>
<head>
<title>Ribosome Profile Plot - Report</title>
</head>
<body>
"""
html += '<h2>Plot ribosome profile - results</h2>\n<hr>\n'
if len(transcript_name):
cmd
|
_args = (
'"{transcript_name}", main="{transcript_name}",'
'coordinates=ffCs@CDS, riboData=riboDat,'
|
'length={transcript_length}'.format(**options))
if transcript_cap:
cmd_args += ', cap={transcript_cap}'.format(**options)
plot_file = os.path.join(output_path, 'Ribosome-profile-plot')
for fmat in ('pdf', 'png'):
if fmat == 'png':
cmd = 'png(file="{}_%1d.png", type="cairo")'.format(plot_file)
else:
cmd = 'pdf(file="{}.pdf")'.format(plot_file)
run_rscript(cmd)
cmd = 'plotTranscript({})'.format(cmd_args)
run_rscript(cmd)
run_rscript('dev.off()')
html += ('<p>Selected ribosome footprint length: '
'<strong>{0}</strong>\n'.format(transcript_length))
for image in sorted(glob.glob('{}_*.png'.format(plot_file))):
html += '<p><img border="1" src="{0}" alt="{0}"></p>\n'.format(
os.path.basename(image))
html += '<p><a href="Ribosome-profile-plot.pdf">PDF version</a></p>\n'
else:
msg = 'No transcript name was provided. Did not generate plot.'
html += '<p>{}</p>'.format(msg)
logging.debug(msg)
logging.debug('\n{:#^80}\n{}\n{:#^80}\n'.format(
' R script for this session ', rscript, ' End R script '))
with open(os.path.join(output_path, 'ribosome-profile.R'), 'w') as r:
r.write(rscript)
html += ('<h4>R script for this session</h4>\n'
'<p><a href="ribosome-profile.R">ribosome-profile.R</a></p>\n'
'</body>\n</html>\n')
with open(html_file, 'w') as f:
f.write(html)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot Ribosome profile')
# required arguments
flags = parser.add_argument_group('required arguments')
flags.add_argument('--rdata_load', required=True,
help='Saved riboSeqR data from Step 2')
flags.add_argument('--transcript_name', required=True,
help='Name of the transcript to be plotted')
flags.add_argument(
'--transcript_length', required=True,
help='Size class of ribosome footprint data to be plotted',
default='27')
flags.add_argument(
'--transcript_cap', required=True,
help=('Cap on the largest value that will be plotted as an abundance '
'of the ribosome footprint data'))
parser.add_argument('--html_file', help='HTML file with reports')
parser.add_argument('--output_path', help='Directory to save output files')
parser.add_argument('--debug', help='Produce debug output',
action='store_true')
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s - %(message)s',
level=logging.DEBUG, stream=sys.stdout)
logging.debug('Supplied Arguments\n{}\n'.format(vars(args)))
if not os.path.exists(args.output_path):
os.mkdir(args.output_path)
plot_transcript(rdata_load=args.rdata_load,
transcript_name=args.transcript_name,
transcript_length=args.transcript_length,
transcript_cap=args.transcript_cap,
html_file=args.html_file, output_path=args.output_path)
logging.debug('Done!')
|
henryroe/xenics_pluto
|
nihts_xcam/__init__.py
|
Python
|
mit
| 77 | 0 |
from __future__ imp
|
ort absolute_import
from .nihts_xcam im
|
port XenicsCamera
|
aGHz/structominer
|
tests/test_document.py
|
Python
|
mit
| 1,199 | 0.003336 |
from mock import patch, Mock
from nose.tools import istest
from unittest import TestCase
from structominer import Document, Field
class DocumentTests(TestCase):
@istest
def creating_document_object_with_string_should_automatically_parse(self):
html = '<html></html>'
with patch('structominer.Document.parse') as mocked_parse:
doc = Document(html)
mocked_parse.assert_called_with(html)
@istest
def document_should_store_fields_in_order(self):
class Doc(Document):
three = Mock(Field, _field_counter=3)
two = Mock(Field, _field_counter=2)
one = Mock(Field, _field_counter=1)
doc = Doc()
self.assertEquals([field._field_counter for field in doc._fields.values()], [1, 2, 3])
@istest
def docum
|
ent_should_only_parse_fields_with_auto_parse_attributes(self):
html = '<html></html>'
class Doc(Document):
one = Mock(Field, _field_counter=1, auto_parse=True)
two = Mock(Field, _field_counter=2
|
, auto_parse=False)
doc = Doc(html)
self.assertTrue(doc.one.parse.called)
self.assertFalse(doc.two.parse.called)
|
endlessm/chromium-browser
|
third_party/llvm/llvm/bindings/python/llvm/disassembler.py
|
Python
|
bsd-3-clause
| 5,918 | 0.002028 |
#===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
from ctypes import CFUNCTYPE
from ctypes import POINTER
from ctypes import addressof
from ctypes import c_byte
from ctypes import c_char_p
from ctypes import c_int
from ctypes import c_size_t
from ctypes import c_ubyte
from ctypes import c_uint64
from ctypes import c_void_p
from ctypes import cast
from .common import LLVMObject
from .common import c_object_p
from .common import get_library
__all__ = [
'Disassembler',
]
lib = get_library()
callbacks = {}
# Constants for set_options
Option_UseMarkup = 1
_initialized = False
_targets = ['AArch64', 'ARM', 'Hexagon', 'MSP430', 'Mips', 'NVPTX', 'PowerPC', 'R600', 'Sparc', 'SystemZ', 'X86', 'XCore']
def _ensure_initialized():
global _initialized
if not _initialized:
# Here one would want to call the functions
# LLVMInitializeAll{TargetInfo,TargetMC,Disassembler}s, but
# unfortunately they are only defined as static inline
# functions in the header files of llvm-c, so they don't exist
# as symbols in the shared library.
# So until that is fixed use this hack to initialize them all
for tgt in _targets:
for initializer in ("TargetInfo", "TargetMC", "Disassembler"):
try:
f = getattr(lib, "LLVMInitialize" + tgt + initializer)
except AttributeError:
continue
f()
_initialized = True
class Disassembler(LLVMObject):
"""Represents a disassembler instance.
Disassembler instances are tied to specific "triple," which must be defined
at creation time.
Disassembler instances can disassemble instructions from multiple sources.
"""
def __ini
|
t__(self, triple):
"""Create a new disassembler i
|
nstance.
The triple argument is the triple to create the disassembler for. This
is something like 'i386-apple-darwin9'.
"""
_ensure_initialized()
ptr = lib.LLVMCreateDisasm(c_char_p(triple), c_void_p(None), c_int(0),
callbacks['op_info'](0), callbacks['symbol_lookup'](0))
if not ptr:
raise Exception('Could not obtain disassembler for triple: %s' %
triple)
LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisasmDispose)
def get_instruction(self, source, pc=0):
"""Obtain the next instruction from an input source.
The input source should be a str or bytearray or something that
represents a sequence of bytes.
This function will start reading bytes from the beginning of the
source.
The pc argument specifies the address that the first byte is at.
This returns a 2-tuple of:
long number of bytes read. 0 if no instruction was read.
str representation of instruction. This will be the assembly that
represents the instruction.
"""
buf = cast(c_char_p(source), POINTER(c_ubyte))
out_str = cast((c_byte * 255)(), c_char_p)
result = lib.LLVMDisasmInstruction(self, buf, c_uint64(len(source)),
c_uint64(pc), out_str, 255)
return (result, out_str.value)
def get_instructions(self, source, pc=0):
"""Obtain multiple instructions from an input source.
This is like get_instruction() except it is a generator for all
instructions within the source. It starts at the beginning of the
source and reads instructions until no more can be read.
This generator returns 3-tuple of:
long address of instruction.
long size of instruction, in bytes.
str representation of instruction.
"""
source_bytes = c_char_p(source)
out_str = cast((c_byte * 255)(), c_char_p)
# This could probably be written cleaner. But, it does work.
buf = cast(source_bytes, POINTER(c_ubyte * len(source))).contents
offset = 0
address = pc
end_address = pc + len(source)
while address < end_address:
b = cast(addressof(buf) + offset, POINTER(c_ubyte))
result = lib.LLVMDisasmInstruction(self, b,
c_uint64(len(source) - offset), c_uint64(address),
out_str, 255)
if result == 0:
break
yield (address, result, out_str.value)
address += result
offset += result
def set_options(self, options):
if not lib.LLVMSetDisasmOptions(self, options):
raise Exception('Unable to set all disassembler options in %i' % options)
def register_library(library):
library.LLVMCreateDisasm.argtypes = [c_char_p, c_void_p, c_int,
callbacks['op_info'], callbacks['symbol_lookup']]
library.LLVMCreateDisasm.restype = c_object_p
library.LLVMDisasmDispose.argtypes = [Disassembler]
library.LLVMDisasmInstruction.argtypes = [Disassembler, POINTER(c_ubyte),
c_uint64, c_uint64, c_char_p, c_size_t]
library.LLVMDisasmInstruction.restype = c_size_t
library.LLVMSetDisasmOptions.argtypes = [Disassembler, c_uint64]
library.LLVMSetDisasmOptions.restype = c_int
callbacks['op_info'] = CFUNCTYPE(c_int, c_void_p, c_uint64, c_uint64, c_uint64,
c_int, c_void_p)
callbacks['symbol_lookup'] = CFUNCTYPE(c_char_p, c_void_p, c_uint64,
POINTER(c_uint64), c_uint64,
POINTER(c_char_p))
register_library(lib)
|
mganeva/mantid
|
scripts/Diffraction/isis_powder/routines/run_details.py
|
Python
|
gpl-3.0
| 5,875 | 0.005447 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from isis_powder.routines import common, yaml_parser
import os
def create_run_details_object(run_number_string, inst_settings, is_vanadium_run, empty_run_number,
grouping_file_name, vanadium_string, splined_name_list=None, van_abs_file_name=None):
"""
Creates and returns a run details object which holds various
properties about the current run.
:param run_number_string: The user string for the current run
:param inst_settings: The current instrument object
:param is_vanadium_run: Boolean of if the current run is a vanadium run
:param empty_run_number: Empty run number(s) from mapping file
:param grouping_file_name: Filename of the grouping file found in the calibration folder
:param vanadium_string: Vanadium run number(s) from mapping file
:param splined_name_list: (Optional) List of unique properties to generate a splined vanadium name from
:param van_abs_file_name: (Optional) The name of the vanadium absorption file
:return: RunDetails object with attributes set to applicable values
"""
cal_map_dict = get_cal_mapping_dict(run_number_string=run_number_string,
cal_mapping_path=inst_settings.cal_mapping_path)
run_number = common.get_first_run_number(run_number_string=run_number_string)
# Get names of files we will be using
calibration_dir = os.path.normpath(os.path.expanduser(inst_settings.calibration_dir))
label = common.cal_map_dictionary_key_helper(dictionary=cal_map_dict, key="label")
offset_file_name = common.cal_map_dictionary_key_helper(dictionary=cal_map_dict, key="offset_file_name")
# Prepend the properties used for creating a van spline so we can fingerprint the file
new_splined_list = splined_name_list if splined_name_list else []
new_splined_list.append(os.path.basename(offset_file_name))
splined_van_name = common.generate_splined_name(vanadium_string, new_splined_list)
unsplined_van_name = common.generate_unsplined_name(vanadium_string, new_splined_list)
if is_vanadium_run:
# The run number should be the vanadium number in this case
run_number = vanadium_string
output_run_string = vanadium_string if is_vanadium_run else run_number_string
# Get the file extension if set
file_extension = getattr(inst_settings, "file_extension")
if file_extension:
# Prefix dot if user has forgotten to
file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension
# Get the output name suffix if set
suffix = getattr(inst_settings, "suffix", None)
# Sample empty if there is one as this is instrument specific
sample_empty = getattr(inst_settings, "sample_empty", None)
# By default, offset file sits in the calibration folder, but it can also be given as an absolute path
if os.path.exists(offset_file_name):
offset_file_path = offset_file_name
else:
offset_file_path = os.path.join(calibration_dir, label, offset_file_name)
# Generate the paths
grouping_file_path = os.path.join(calibration_dir, grouping_file_name)
splined_van_path = os.path.join(calibration_dir, label, splined_van_name)
unsplined_van_path = os.path.join(calibration_dir, label, unsplined_van_name)
van_absorb_path = os.path.join(calibration_dir, van_abs_file_name) if van_abs_file_name else None
return _RunDetails(empty_run_number=empty_run_number, file_extension=file_extension,
run_number=run_number, output_run_string=output_run_string, label=label,
offset_file_path=offset_file_path, grouping_file_path=grouping_file_path,
splined_vanadium_path=splined_van_path, vanadium_run_number=vanadium_string,
sample_empty=sample_empty, vanadium_abs_path=van_absorb_path,
unsplined_vanadium_path=unsplined_van_path, output_suffix=suffix)
def get_cal_mapping_dict(run_number_string, cal_mapping_path):
# Get the python dictionary from the YAML mapping
run_
|
number = common.get_first_run_number(run_number_string=run_number_string)
cal_mapping_dict = yaml_parser.get_run_dictionary(run_number_string=run_number,
file_path=cal_mapping_path)
return cal_mapping_dict
class _RunDetails(obje
|
ct):
"""
This class holds the full file paths associated with each run and various other useful attributes
"""
def __init__(self, empty_run_number, file_extension, run_number, output_run_string, label,
offset_file_path, grouping_file_path, splined_vanadium_path, vanadium_run_number,
sample_empty, vanadium_abs_path, unsplined_vanadium_path, output_suffix):
# Essential attribute
self.empty_runs = empty_run_number
self.run_number = run_number
self.output_run_string = output_run_string
self.label = label
self.offset_file_path = offset_file_path
self.grouping_file_path = grouping_file_path
self.splined_vanadium_file_path = splined_vanadium_path
self.unsplined_vanadium_file_path = unsplined_vanadium_path
self.vanadium_run_numbers = vanadium_run_number
# Optional
self.file_extension = str(file_extension) if file_extension else None
self.sample_empty = sample_empty
self.vanadium_absorption_path = vanadium_abs_path
self.output_suffix = output_suffix
|
broadinstitute/hellbender
|
src/main/python/org/broadinstitute/hellbender/gcnvkernel/io/io_vcf_parsing.py
|
Python
|
bsd-3-clause
| 4,367 | 0.002977 |
import logging
import vcf
from typing import List, Tuple
_logger = logging.getLogger(__name__)
# TODO: for now I'm going to do the lazy thing and just traverse the VCF each time for each sample
def read_sample_segments_and_calls(intervals_vcf: str,
clustered_vcf: str,
sample_name: str,
|
contig: str) -> List[Tuple[int, int, int]]:
"""
Get the segmentation "path" to use for calculating qualities based on the VCF with clustered breakpoints
:param intervals_vcf:
:param clustered_vcf:
:param sample_name:
:param contig:
:return: {copy number, start index, stop index (inclusive)}
"""
interv
|
als = vcf.Reader(filename=intervals_vcf)
intervals2 = vcf.Reader(filename=intervals_vcf)
segments = vcf.Reader(filename=clustered_vcf)
path: List[Tuple[int, int, int]] = []
segment_start_index = 0
segment_end_index = 0
# A record corresponds to [CHROM,POS,REF,ALT]
try:
interval_start_iter = iter(intervals.fetch(contig))
interval_end_iter = iter(intervals2.fetch(contig))
except ValueError:
print('ERROR: could not fetch intervals')
raise
else:
start_interval = next(interval_start_iter)
end_interval = next(interval_end_iter)
intervals_copy_number = try_getting_format_attribute(end_interval, sample_name, 'CN')
try:
segments_iter = iter(segments.fetch(contig))
except ValueError:
return path
else:
segments_rec = next(segments_iter)
segment_copy_number = try_getting_format_attribute(segments_rec, sample_name, 'CN')
# we assume segments are sorted by start, but may be overlapping
while segments_rec is not None and start_interval is not None:
# make sure interval start matches
while start_interval is not None and start_interval.POS < segments_rec.POS:
try:
start_interval = next(interval_start_iter)
segment_start_index += 1
end_interval = next(interval_end_iter)
segment_end_index += 1
except StopIteration:
print('ERROR: ran out of intervals with unmatched segments remaining')
raise
# once start matches, move the interval end
while end_interval is not None and try_getting_info_attribute(segments_rec, 'END') > \
try_getting_info_attribute(end_interval, 'END'):
try:
end_interval = next(interval_end_iter)
segment_end_index += 1
intervals_copy_number = try_getting_format_attribute(end_interval, sample_name, 'CN')
except StopIteration:
print('WARN: ran out of intervals with segment end unmatched')
end_interval = None
# add the segment
if segment_end_index < segment_start_index:
print('Sample {0} contains segment at {1}:{2} with end index greater than start index'.format(sample_name, contig, segments_rec.POS))
path.append((segment_copy_number, segment_start_index, segment_end_index))
# do this the dumb way because each reader gets the same iterator
segment_end_index = 0
interval_end_iter = iter(intervals2.fetch(contig))
end_interval = next(interval_end_iter)
# get the next segment
try:
segments_rec = next(segments_iter)
segment_copy_number = try_getting_format_attribute(segments_rec, sample_name, 'CN')
except StopIteration:
segments_rec = None
segments_iter = None
return path
def try_getting_info_attribute(record,
attribute: str) -> int:
try:
value = record.INFO[attribute]
except AttributeError:
print('No {} field for record at position:{}'.format(attribute, record.POS))
else:
return value
def try_getting_format_attribute(record,
sample_name: str,
attribute: str) -> int:
try:
value = record.genotype(sample_name)[attribute]
except AttributeError:
print('No {} field for {} intervals at position:{}'.format(attribute, sample_name, record.POS))
else:
return value
|
monkeesuit/school
|
Network Security/ARP/arp suite/py/arp.py
|
Python
|
mit
| 7,562 | 0.031605 |
# ARP Suite - Run ARP Commands From Command Line
import sys
import arp_mitm as mitm
import arp_sslstrip as sslstrip
import arp_listen as listen
import arp_request as request
import arp_cache as cache
import arp_reconnaissance as recon
import arp_interactive as interactive
if __name__ == "__main__":
arguments = sys.argv[1:]
if '-h' in arguments or '--help' in arguments:
print '[INFO]\tARP Suite\n'
print '[USAGE] arp.py -c/i/L/r\n'
print '[FUNCTIONS]'
print ' -c --cache = Work with ARP Cache.'
print ' -i --interactive = Runs Interactive ARP Suite.'
print ' -L --listen = Runs an arpclient in listen Mode.'
print ' -r --request = Generate an ARP Request Message.'
print '\n\t* Use --h with any of these functions to learn more about them.'
print '\t\tex. arp.py -c --h'
print ''
sys.exit(1)
if '-i' in arguments or '--interactive' in arguments:
interactive.run()
sys.exit(1)
if '-L' in arguments or'--listen' in arguments:
if '--h' in arguments:
print '[INFO]\tCreates an instance of arpclient in listen mode.'
print '\tHandles ARP Messages and ARP Table.'
print ''
print '[USAGE] arp.py -l\n'
print '[ARGUMENTS]'
print '\tNONE'
sys.exit(1)
listen.listen()
sys.exit(1)
if '-r' in arguments or '--request' in arguments:
if '--h' in arguments:
print '[INFO]\tCreate an ARP Request message to given IP Address.'
print '\tMake sure the
|
re is an instance of arpclient in listen mode'
print '\tto handle ARP messages and manipulate ARP table ("arp.py -l").'
print ''
print '[USAGE] arp.py -r --ip [ip]\n'
print '[ARGUMENTS]'
print '\t"--ip" = IP Address You Wish To Resolve'
print ''
sys.exit(1)
if '--ip' in arguments:
option_index = arguments.index('--ip')
ip = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -r --h"'
sys.exit(0)
request
|
.send(ip)
sys.exit(1)
if '-c' in arguments or '--cache' in arguments:
if '--h' in arguments:
print '[INFO]\tWork with the ARP Cache\n'
print '[USAGE] arp.py -c --d/l/a/r --i [ip] --m [mac]\n'
print '[ARGUMENTS]'
print '"--d" = Display ARP Cache.'
print '"--l" = Look Up ARP Cache. Must Specify Either Address'
print '"--a" = Add ARP Cache Entry. Must Specify Both Addresses'
print '"--r" = Remove ARP Cache Entry. Must Specify Both Addresses'
print '"--i" = An IP Address'
print '"--m" = A MAC Address'
print ''
# Display
if '--d' in arguments:
cache.cache(1)
# Look Up
if '--l' in arguments:
if '--i' in arguments:
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
cache.cache(2,ip=ipoption)
sys.exit(1)
elif '--m' in arguments:
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
cache.cache(2,mac=macoption)
sys.exit(1)
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
# ADD an Entry
if '--a' in arguments:
if '--i' in arguments: # use --i to indicate you are giving an ip address
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
if '--m' in arguments: # use --m to indicate you are giving a mac address
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
cache.cache(3,ip=ipoption,mac=macoption)
sys.exit(1)
# REMOVE an Entry
if '--r' in arguments:
if '--i' in arguments: # use --i to indicate you are giving an ip address
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
if '--m' in arguments: # use --m to indicate you are giving a mac address
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
cache.cache(4,ip=ipoption,mac=macoption)
sys.exit(1)
if '-m' in arguments or '--mitm' in arguments:
if '--h' in arguments:
print '[Info]\tLaunch an ARP Poisoning Man in the Middle Attack.\n'
print '[Usage] arp.py -m --aI [ip] --aM [mac] --bI [ip] --bM [mac]\n'
print '[Arguments]'
print '\t"--aI" = target A\'s IP Address'
print '\t"--aM" = target A\'s MAC Address'
print '\t"--bI" = target B\'s IP Address'
print '\t"--bM" = target B\'s MAC Address'
print ''
sys.exit(1)
if '--aI' in arguments:
option_index = arguments.index('--aI')
aIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--aM' in arguments:
option_index = arguments.index('--aM')
aMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--bI' in arguments:
option_index = arguments.index('--bI')
bIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--bM' in arguments:
option_index = arguments.index('--bM')
bMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
mitm.mitm(aIP,aMAC,bIP,bMAC)
sys.exit(1)
if '--sslstrip' in arguments:
if '--h' in arguments:
print '[Info]\tLaunch a SSL Strip Attack.\n'
print '[Usage] arp.py --sslstrip --gI [ip] --gM [mac] --tI [ip] --tM [mac]\n'
print '[Arguments]'
print '\t"--gI" = gateway\'s IP Address'
print '\t"--gM" = gateway\'s MAC Address'
print '\t"--tI" = target\'s IP Address'
print '\t"--tM" = target\'s MAC Address'
print ''
sys.exit(1)
if '--gI' in arguments:
option_index = arguments.index('--gI')
gIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--gM' in arguments:
option_index = arguments.index('--gM')
gMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--tI' in arguments:
option_index = arguments.index('--tI')
tIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--tM' in arguments:
option_index = arguments.index('--tM')
tMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
sslstrip.sslstrip(gIP,gMAC,tIP,tMAC)
sys.exit(1)
if '--recon' in arguments:
if '--h' in arguments:
print '[Info]\tLearn Address of Those on Network.\n'
print '[Usage] arp.py --recon --ip [iprange], wildcards * allowed\n'
print '[Arguments]'
print '\t"--ip" = A Range of IP Adresses to Scan'
if '--ip' in arguments:
option_index = arguments.index('--ip')
iprange = arguments[option_index+1]
recon.run(str(iprange))
sys.exit(1)
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py --recon --h"'
sys.exit(0)
|
cdcq/jzyzj
|
syzoj/views/problem.py
|
Python
|
mit
| 4,193 | 0.00477 |
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from urllib import urlencode
from flask import jsonify, redirect, url_for, abort, request, render_template
from syzoj import oj, controller
from syzoj.models import User, Problem, File, FileParser
from syzoj.controller import Paginate, Tools
from .common import need_login, not_have_permission, show_error
@oj.route("/problem")
def problem_set():
query = Problem.query
problem_title = request.args.get("problem_title")
if request.args.get("problem_title"):
query = query.filter(Problem.title.like((u"%" + problem_title + u"%")))
else:
problem_title = ''
def make_url(page, other):
other["page"] = page
return url_for("problem_set") + "?" + urlencode(other)
sorter = Paginate(query, make_url=make_url, other={"problem_title": problem_title},
cur_page=request.args.get("page"), edge_display_num=50, per_page=50)
return render_template("problem_set.html", tool=Tools, tab="problem_set", sorter=sorter, problems=sorter.get())
@oj.route("/problem/<int:problem_id>")
def problem(problem_id):
user = User.get_cur_user()
problem = Problem.query.filter_by(id=problem_id).first()
if not problem:
abort(404)
if problem.is_allowed_use(user) == False:
return not_have_permission()
return render_template("problem.html", tool=Tools, tab="problem_set", problem=problem)
@oj.route("/problem/<int:problem_id>/edit", methods=["GET", "POST"])
def edit_problem(problem_id):
user = User.get_cur_user()
if not user:
return need_login()
problem = Problem.query.filter_by(id=problem_id).first()
if problem and problem.is_allowed_edit(user) == False:
return not_have_permission()
if request.method == "POST":
if not problem:
problem_id = controller.create_problem(user=user, title=request.form.get("title"))
problem = Problem.query.filter_by(id=problem_id).first()
problem.update(title=request.form.get("title"),
description=request.form.get("description"),
input_format=request.form.get("input_format"),
output_format=request.form.get("output_format"),
example=request.form.get("example"),
limit_and_hint=request.form.get("limit_and_hint"))
problem.save()
return redirect(url_for("problem", problem_id=problem.id))
|
else:
return render_template("edit_problem.html", tool=Tools, problem=problem)
@oj.route("/problem/<int:problem_id>/upload", methods=["GET", "POST"])
d
|
ef upload_testdata(problem_id):
user = User.get_cur_user()
if not user:
return need_login()
problem = Problem.query.filter_by(id=problem_id).first()
if not problem:
abort(404)
if problem.is_allowed_edit(user) == False:
return not_have_permission()
if request.method == "POST":
file = request.files.get("testdata")
if file:
problem.update_testdata(file)
if request.form.get("time_limit"):
problem.time_limit = int(request.form.get("time_limit"))
if request.form.get("memory_limit"):
problem.memory_limit = int(request.form.get("memory_limit"))
problem.save()
return redirect(url_for("upload_testdata", problem_id=problem_id))
else:
return render_template("upload_testdata.html", tool=Tools, problem=problem, parse=FileParser.parse_as_testdata)
# TODO:Maybe need add the metho of toggle is_public attr to Problem
@oj.route("/api/problem/<int:problem_id>/public", methods=["POST", "DELETE"])
def change_public_attr(problem_id):
session_id = request.args.get('session_id')
user = User.get_cur_user(session_id=session_id)
problem = Problem.query.filter_by(id=problem_id).first()
if problem and user and user.have_privilege(2):
if request.method == "POST":
problem.is_public = True
elif request.method == "DELETE":
problem.is_public = False
problem.save()
else:
abort(404)
return jsonify({"status": 0})
|
tistaharahap/ds-for-me
|
extractor.py
|
Python
|
mit
| 7,464 | 0.004555 |
# -*- coding: utf-8 -*-
from text.classifiers import NaiveBayesClassifier
from textblob import TextBlob
import feedparser
import time
import redis
import hashlib
import json
TIMEOUT = 60*60
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
def feature_extractor(text):
if not isinstance(text, TextBlob):
text = TextBlob(text.lower())
return {
'has_rumor': 'rumor' in text.words,
'has_gosip': 'gosip' in text.words,
'has_urbanesia': 'urbanesia' in text.words,
'has_batista': 'batista' in text.words,
'has_harahap': 'harahap' in text.words,
'has_pemasaran': 'pemasaran' in text.words,
'has_saham': 'saham' in text.words,
'has_hackathon': 'hackathon' in text.words,
'has_ipo': 'ipo' in text.words,
'has_akuisisi': 'akuisisi' in text.words,
'has_startup': 'startup' in text.words,
'has_android': 'android' in text.words,
'has_aplikasi': 'aplikasi' in text.words,
'has_payment': 'payment' in text.words,
'has_pembayaran': 'pembayaran' in text.words,
'has_api': 'api' in text.words,
'has_kompetisi': 'kompetisi' in text.words,
'has_ide': 'ide' in text.words,
'has_permainan': 'permainan' in text.words,
'has_game': 'game' in text.words,
'has_fundraising': 'fundraising' in text.words,
'has_askds': '[Ask@DailySocial]' in text.words,
'has_investasi': 'investasi' in text.words,
'has_musik': 'musik' in text.words,
'has_lagu': 'lagu' in text.words,
'has_bhinneka': 'bhinneka' in text.words,
'has_marketplace': 'marketplace' in text.words,
'has_mobile': 'mobile' in text.words,
'has_cto': 'cto' in text.words,
'has_traffic': 'traffic' in text.words,
'starts_with_[': text[0] == '['
}
train_set = [
('Berbarengan dengan Launch Festival, Ice House Buka Kompetisi Wujudkan Ide-Ide Aplikasi Mobile.', 'ok'),
('Ulang Tahun Ke-21, Layanan E-Commerce Bhinneka Segera Perbarui Platform E-Commerce dan Luncurkan Marketplace Terkurasi.', 'ko'),
('Aplikasi Pencatat Blastnote Hadir di Android.', 'ok'),
('Portal Hiburan Digital UZone Kini Hadir Dalam Versi Aplikasi Mobile.', 'ok'),
('CTI IT Infrastructure Summit 2014 Bahas Big Data Sebagai Tren Teknologi', 'ko'),
('Dua Berita Buruk Besar Bagi Blackberry', 'ok'),
('Tanggapan Pelaku Industri Digital di Indonesia tentang Fenomena Permainan Mobile Flappy Bird', 'ok'),
('[Ask@DailySocial] Proses Fundraising Untuk Startup', 'ok'),
('Investasi $1 Miliar, Foxconn Pastikan Bangun Pabriknya di DKI Jakarta', 'ok'),
('Raksasa Digital Cina Tencent Dikabarkan Akuisisi Portal Berita Okezone', 'ko'),
('Wego Tawarkan Akses Reservasi Tiket dan Hotel Lebih Mudah Melalui Aplikasi Mobile', 'ok'),
('Telkom Hadirkan Agen Wisata Online Hi Indonesia', 'ko'),
('Meski Didera Isu Fake Likes, Facebook Tetap Jadi Pilihan Utama Untuk Pemasaran Digital', 'ok'),
('Dave Morin Pastikan Saham Bakrie Global Group di Path Kurang dari 1%', 'ok'),
('Kecil Kemungkinan Pemerintah Tutup Telkomsel dan Indosat Terkait Dugaan Penyadapan oleh Australia', 'ok'),
('Kakao Dikabarkan Gelar Penawaran Saham Perdana Tahun Depan', 'ok'),
('Ericsson Akan Hadirkan Layanan Streaming TV', 'ok'),
('Ryu Kawano: Ingin Startup Anda Go Global? Tunggu Dulu!', 'ok'),
('Kerja Sama dengan GHL Systems Malaysia, Peruri Digital Security Kembangkan Sistem Pembayaran Online', 'ok'),
('Aplikasi Logbook Travel Kini Telah Hadir di Android', 'ok'),
('Musikator Hadirkan Layanan Agregator Lagu Untuk Distribusi Digital', 'ok'),
('[Manic Monday] Strategi Produksi Konten Di Era Multilayar', 'ok'),
('Bakrie Telecom Jajaki Kemungkinan Carrier Billing untuk Path', 'ok'),
('Viber Secara Resmi Telah Diakuisisi Oleh Rakuten Sebesar US$ 900 Juta', 'ok'),
('Situs Panduan Angkutan Umum Kiri.travel Buka API, Tantang Pengembang Buat Aplikasi Windows Phone', 'ok'),
('Wego Luncurkan Jaringan Afiliasi WAN.Travel', 'ko'),
('Business Insider Masuki Pasar Indonesia Bekerja Sama dengan REV Asia', 'ko'),
('Waze Memiliki 750.000 Pengguna di Indonesia', 'ok'),
('Survei Nielsen: Masyarakat Asia Tenggara Lebih Suka Gunakan Uang Tunai untuk Belanja Online', 'ok'),
('CTI IT Infrastructure Summit 2014 Bahas Big Data Sebagai Tren Teknologi', 'ko'),
('Pacu Bisnis di Asia Tenggara, Game Online Asing Kini Lebih Lokal', 'ko'),
('Enam Pilihan Layanan Streaming Musik Yang Dapat Dinikmati di Indonesia', 'ok'),
('Country Manager Yahoo Indonesia Roy Simangunsong Mengundurkan Diri', 'ko'),
('Investasi $1 Miliar, Foxconn Pastikan Bangun Pabriknya di DKI Jakarta', 'ok'),
('Jomblo.com Tawarkan Media Sosial Untuk Mencari Jodoh', 'ko'),
('Mitra Adiperkasa dan Groupon Pilih aCommerce Indonesia untuk Pusat Logi
|
stik dan Pengiriman Layanan E-Commerce', 'ko'),
('Transformasi Portal Informasi Kecantikan Female Daily Disambut Positif, Beberkan Rencana-Rencana 2014', 'ko'),
('Visa Gelar Promosi Diskon Setiap Jumat Bekerja Sama dengan Enam Layanan E-Commerce Lokal', 'ko'),
('Kerjasama Strategis, Blue Bird Group Benamkan Teknologi Interkoneksi Microsoft Ke Armada Premium Big Bird', 'ko'),
('Ramaikan Industri Fashion E-Commerce Indonesia, VIP Plaza
|
Hadir Tawarkan Promo Flash Sale', 'ko'),
('Bidik Citizen Journalism, Detik Hadirkan Media Warga PasangMata', 'ko'),
('Asia Pasifik Jadi Kawasan E-Commerce B2C Terbesar di Dunia Tahun 2014', 'ko'),
('CTO Urbanesia Batista Harahap Mengundurkan Diri', 'ok'),
('Tees Indonesia Alami Peningkatan Traffic Hingga 7x, Namun Tidak Seperti Yang Anda Kira', 'ok')
]
cl = NaiveBayesClassifier(train_set=train_set,
feature_extractor=feature_extractor)
redis_conn = redis.StrictRedis(host=REDIS_HOST,
port=REDIS_PORT)
def get_feed():
feed_url = 'http://feeds.feedburner.com/dsnet?format=xml'
feeds = feedparser.parse(feed_url).get('entries')
if feeds is None:
return
def process_entry(entry):
def process_tags(tags):
return [tag.get('term') for tag in tags]
cls = cl.classify(text=entry.get('title'))
data = {
'author': entry.get('author'),
'title': entry.get('title'),
'link': entry.get('link'),
'published': int(time.mktime(entry.get('published_parsed'))),
'summary': entry.get('summary'),
'tags': process_tags(entry.get('tags')),
'class': cls
}
return data if cls == 'ok' else None
feeds = [process_entry(entry) for entry in feeds]
return [entry for entry in feeds if entry is not None]
def md5(text):
m = hashlib.md5()
m.update(text.encode('utf-8'))
return m.hexdigest()
def cycle():
try:
posts = get_feed()
except KeyError:
print 'Unreadable RSS feed, bailing..'
return
if not posts:
print 'Got nothing, bailing..'
return
def redis_insert(post):
name = 'ds-articles-ok'
redis_conn.zadd(name, post.get('published'), json.dumps(post))
[redis_insert(post=post) for post in posts]
print 'Got %d posts this time.' % len(posts)
if __name__ == '__main__':
print 'Starting up..'
while True:
cycle()
print 'Sleeping for %s seconds.' % TIMEOUT
time.sleep(TIMEOUT)
|
FeodorM/Computer-Graphics
|
util/matrix.py
|
Python
|
mit
| 2,886 | 0.000704 |
from typing import Sequence
from numbers import Number
from tabulate import tabulate
class Matrix(Sequence):
def __init__(self, matrix: Sequence[Sequence[float]]):
assert (isinstance(matrix, Sequence) and
isinstance(matrix, Sequence)), "Wrong data"
self.__matrix = [[float(x) for x in row] for row in matrix]
@staticmethod
def one(rows: int, columns: int):
return [
[1 if i == j else 0 for j in range(columns)] for i in range(rows)
]
@staticmethod
def zero(rows: int, columns: int):
return [[0] * columns for _ in range(rows)]
def __repr__(self):
return 'Matrix({})'.format(self.__matrix)
def __str__(self):
return tabulate(self.__matrix)
def __len__(self):
return len(self.__matrix)
def __getitem__(self, item):
return self.__matrix.__getitem__(item)
def __iter__(self):
return iter(self.__matrix)
def __mul__(self, other):
assert isinstance(other, Sequence)
# Количество столбцов равно количеству строк / элементов
assert len(self.__matrix[0]) == len(other), "Wrong data"
if isinstance(other[0], Sequence):
return Matrix([
[
sum(self[i][k] * other[k][j] for k in range(len(other))) for j in range(len(other[0]))
] for i in range(len(self))
])
else:
return [
sum(x * y for x, y in zip(row, other)) for row in self
]
def __rmul__(self, other):
assert isinstance(other, Number)
return Matrix([
[other * x for x in row] for row in self.__matrix
])
def __add__(self, other):
# and all(len(other) == len(row) for row in other)), "Wrong data"
assert (isinstance(other, Sequence) and
isinstance(other[0], Sequence) and
len(self) == len(other) and
len(self[0]) == len(other[0])), "Wrong data"
return Matrix([
[x + y for x, y in zip(r1, r2)] for r1, r2 in zip(self.__matrix, other)
])
def __neg__(self):
return Matrix([
[-x for x in row] for row in self.__matrix
])
def __sub__(self, other):
assert (isinstance(other, Sequence) and
isinstance(other[0], Sequence) and
all(len(other) == len(row) for row in other)), "Wrong data"
return Matrix([
[x - y for x, y in zip(r1, r2)] for r1, r2 in zi
|
p(self, other)
])
@property
def shape(self):
return len(self.__matrix), len(self.__mat
|
rix[0])
if __name__ == '__main__':
m = Matrix([[1, 2, 1], [2, 3, 0]])
a = Matrix([[1, 0, 0], [2, 1, 0], [1, 1, 0]])
# print(m, m.shape)
# print(a, a.shape)
print(m * a)
|
mailund/IMCoalHMM
|
src/IMCoalHMM/CTMC.py
|
Python
|
gpl-2.0
| 2,581 | 0.001937 |
"""Code for constructing CTMCs and computing transition probabilities
in them."""
from numpy import zeros
from scipy import matrix
from scipy.linalg import expm
class CTMC(object):
"""Class representing the CTMC for the back-in-time coalescent."""
def __init__(self, state_space, rates_table):
"""Create the CTMC based on a state space and a mapping
from transition labels to rates.
:param state_space: The state space the CTMC is over.
:type state_space: IMCoalHMM.CoalSystem
:param rates_table: A table where transition rates can
be looked up.
:type rates_table: dict
"""
# Remember this, just to decouple state space from CTMC
# in other parts of the code...
self.state_space = state_space
# noinspection PyCallingNonCallable
self.rate_matrix = matrix(zeros((len(state_space.states),
len(state_space.states))))
for src, trans, dst in state_space.transitions:
self.rate_matrix[src, dst] = rates_table[trans]
for i in xrange(len(state_space.states)):
self.rate_matrix[i, i] = - self.rate_matrix[i, :].sum()
self.prob_matrix_cache = dict()
def probability_matrix(self, delta_t):
"""Computes the transition probability matrix for a
time period of delta_t.
:param delta_t: The time period the CTMC should run for.
:type delta_t: float
:returns: The probability transition matrix
:rtype: matrix
"""
if not delta_t in self.prob_matrix_cache:
self.prob_matrix_cache[delta_t] = expm(self.rate_matrix * delta_t)
return self.prob_matrix_cache[delta_t]
# We cache the CTMCs because in the optimisations, especially the models with a large number
# of parameters, we are creating the same CTMCs again and again and computing the probability
# transition matrices is where we spend most of the time.
from cache import Cache
CTMC_CACHE = Cache()
def make_ctmc(state_space, rates_table):
"""Create the CTMC based on a state space and a mapping
from transition labels to rates.
:param state_space: The state space the CTMC is over.
:type state_space: IMCoalHMM.CoalSystem
:param rates_table: A table where
|
transition rates can be looked up.
:type rates_table: dict
"""
cache_key = (state_space, tuple(rates_table.items()))
if not cache_key in CTMC_CACHE:
CTMC_CACHE[cache_key] = CTMC(state_space, rates_table)
return CTMC_CACHE[cache_
|
key]
|
rahulunair/nova
|
nova/tests/unit/api/openstack/compute/test_keypairs.py
|
Python
|
apache-2.0
| 27,057 | 0 |
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_policy import policy as oslo_policy
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import policy
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id, limit, marker):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context):
raise exception.KeyPairExists(key_name='create_duplicate')
class KeypairsTestV21(test.TestCase):
base_url = '/v2/%s' % fakes.FAKE_PROJECT_ID
validation_error = exception.ValidationError
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app_v21()
self.controller = keypairs_v21.KeypairController()
def setUp(self):
super(KeypairsTestV21, self).setUp()
fakes.stub_out_networking(self)
fakes.stub_out_secgroup_api(self)
self.stub_out("nova.db.api.key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stub_out("nova.db.api.key_pair_create",
db_key_pair_create)
self.stub_out("nova.db.api.key_pair_destroy",
db_key_pair_destroy)
self._setup_app_and_controller()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
res_dict = self.controller.create(self.req, body=body)
self.assertGreater(len(res_dict['keypair']['fingerprint']), 0)
self.assertGreater(len(res_dict['keypair']['private_key']), 0)
self._assert_keypair_type(res_dict)
def _test_keypair_create_bad_request_case(self,
body,
exception):
self.assertRaises(exception,
self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_leading_trailing_spaces(self):
body = {
'keypair': {
'name': ' test '
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
body = {'keypa
|
ir': {'name': ' test '}}
self.req.set_legacy_v2()
res_dict = self.controller.create(self.req, body=body)
self.assertEqual('test', res_dict['keypair']['name'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'
|
name': 'test/keypair'
}
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
res_dict = self.controller.create(self.req, body=body)
# FIXME(ja): Should we check that public_key was sent to create?
self.assertGreater(len(res_dict['keypair']['fingerprint']), 0)
self.assertNotIn('private_key', res_dict['keypair'])
self._assert_keypair_type(res_dict)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_import_quota_limit(self, mock_check):
mock_check.side_effect = exception.OverQuota(overs='key_pairs',
usages={'key_pairs': 100})
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_quota_limit(self, mock_check):
mock_check.side_effect = exception.OverQuota(overs='key_pairs',
usages={'key_pairs': 100})
b
|
teracyhq/flask-boilerplate
|
app/api_1_0/args.py
|
Python
|
bsd-3-clause
| 308 | 0 |
from webargs import fields
from ..api.validators import Email, passwo
|
rd
user_args = {
'email': fields.Str(validat
|
e=Email, required=True),
'password': fields.Str(validate=password, required=True)
}
role_args = {
'name': fields.Str(required=True),
'description': fields.Str(required=True)
}
|
alon/polinax
|
libs/external_libs/gdata.py-1.0.13/src/gdata/contacts/service.py
|
Python
|
gpl-2.0
| 5,972 | 0.009377 |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContactsService extends the GDataService to streamline Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'dbrattli (Dag Brattli)'
import gdata
import atom.service
import gdata.service
import gdata.calendar
import atom
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contats service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com',
additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='cp', source=source,
server=server,
additional_headers=additional_headers)
def GetContactsFeed(self,
uri='http://www.google.com/m8/feeds/contacts/default/base'):
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def CreateContact(self, new_contact,
insert_uri='/m8/feeds/contac
|
ts/default/base', url_params=None,
escape_params=True):
"""Adds an event to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new event which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parame
|
ters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the event which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
url_prefix = 'http://%s/' % self.server
if edit_uri.startswith(url_prefix):
edit_uri = edit_uri[len(url_prefix):]
response = self.Put(updated_contact, '/%s' % edit_uri,
url_params=url_params,
escape_params=escape_params)
if isinstance(response, atom.Entry):
return gdata.contacts.ContactEntryFromString(response.ToString())
else:
return response
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an event with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'http://www.google.com/m8/feeds/contacts/default/base/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
url_prefix = 'http://%s/' % self.server
if edit_uri.startswith(url_prefix):
edit_uri = edit_uri[len(url_prefix):]
return self.Delete('/%s' % edit_uri,
url_params=url_params, escape_params=escape_params)
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/contacts/default/base'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
|
goyal-sidd/BLT
|
website/migrations/0038_issue_upvotes.py
|
Python
|
agpl-3.0
| 447 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-17 01:43
from __future__ import unicode_literals
from django.db i
|
mport migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0037_auto_20170813_0319'),
]
operations = [
migrations.AddField(
model_name='issue',
name='upvotes',
field=models.IntegerField(default=0)
|
,
),
]
|
GoogleCloudPlatform/covid-19-open-data
|
src/england_data/standardize_data.py
|
Python
|
apache-2.0
| 21,282 | 0.010384 |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for pre-processing the data into individual, standardized formats."""
import collections
import datetime
import itertools
import os
import pathlib
import re
from typing import Callable, Dict, Set, Tuple
from absl import logging
from dm_c19_modelling.england_data import constants
import pandas as pd
import yaml
_PATH_FILENAME_REGEXES = "filename_regexes.yaml"
_COLUMNS = constants.Columns
_DATE_FORMAT = "%Y-%m-%d"
def _order_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Orders the columns of the dataframe as: date, region, observations."""
df.insert(0, _COLUMNS.DATE.value, df.pop(_COLUMNS.DATE.value))
reg_columns = []
obs_columns = []
for col in df.columns[1:]:
if col.startswith(constants.REGION_PREFIX):
reg_columns.append(col)
elif col.startswith(constants.OBSERVATION_PREFIX):
obs_columns.append(col)
else:
raise ValueError(f"Unknown column: '{col}'")
columns = [_COLUMNS.DATE.value] + reg_columns + obs_columns
return df[columns]
def _raw_data_formatter_daily_deaths(filepath: str)
|
-> pd.DataFrame:
"""Loads and formats daily deaths data."""
sheet_name = "Tab4 Deaths by trust"
header = 15
df = pd.read_excel(filepath, sheet_name=sheet_name, header=header)
# Drop rows and columns which are all nans.
df.dropna(axis=0, how="all", inplace=True)
df.dropna(axis=1, how="all", inplace=True)
# Drop unneeded columns and rows.
drop_columns = ["Total", "Awaiting verification"]
up_to_mar_1_index = "Up to 01-Mar-20
|
"
if sum(i for i in df[up_to_mar_1_index] if isinstance(i, int)) == 0.0:
drop_columns.append(up_to_mar_1_index)
df.drop(columns=drop_columns, inplace=True)
df = df[df["Code"] != "-"]
# Melt the death counts by date into "Date" and "Death Count" columns.
df = df.melt(
id_vars=["NHS England Region", "Code", "Name"],
var_name="Date",
value_name="Death Count")
# Rename the columns to their standard names.
df.rename(
columns={
"Date": _COLUMNS.DATE.value,
"Death Count": _COLUMNS.OBS_DEATHS.value,
"Code": _COLUMNS.REG_TRUST_CODE.value,
"Name": _COLUMNS.REG_TRUST_NAME.value,
"NHS England Region": _COLUMNS.REG_NHSER_NAME.value,
},
inplace=True)
_order_columns(df)
df[_COLUMNS.DATE.value] = df[_COLUMNS.DATE.value].map(
lambda x: x.strftime(_DATE_FORMAT))
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_TRUST_NAME.value,
_COLUMNS.REG_TRUST_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_deaths' contains nans")
return df
def _raw_data_formatter_daily_cases(filepath: str) -> pd.DataFrame:
"""Loads and formats daily cases data."""
df = pd.read_csv(filepath)
df.rename(columns={"Area type": "Area_type"}, inplace=True)
df.query("Area_type == 'ltla'", inplace=True)
# Drop unneeded columns and rows.
drop_columns = [
"Area_type", "Cumulative lab-confirmed cases",
"Cumulative lab-confirmed cases rate"
]
df.drop(columns=drop_columns, inplace=True)
# Rename the columns to their standard names.
df.rename(
columns={
"Area name": _COLUMNS.REG_LTLA_NAME.value,
"Area code": _COLUMNS.REG_LTLA_CODE.value,
"Specimen date": _COLUMNS.DATE.value,
"Daily lab-confirmed cases": _COLUMNS.OBS_CASES.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_LTLA_NAME.value,
_COLUMNS.REG_LTLA_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_cases' contains nans")
return df
def _raw_data_formatter_google_mobility(filepath: str) -> pd.DataFrame:
"""Loads and formats Google mobility data."""
df = pd.read_csv(filepath)
# Filter to UK.
df.query("country_region_code == 'GB'", inplace=True)
# Drop unneeded columns and rows.
drop_columns = [
"country_region_code", "country_region", "metro_area", "census_fips_code"
]
df.drop(columns=drop_columns, inplace=True)
# Fill missing region info with "na".
df[["sub_region_1", "sub_region_2", "iso_3166_2_code"]].fillna(
"na", inplace=True)
# Rename the columns to their standard names.
df.rename(
columns={
"sub_region_1":
_COLUMNS.REG_SUB_REGION_1.value,
"sub_region_2":
_COLUMNS.REG_SUB_REGION_2.value,
"iso_3166_2_code":
_COLUMNS.REG_ISO_3166_2_CODE.value,
"date":
_COLUMNS.DATE.value,
"retail_and_recreation_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_RETAIL_AND_RECREATION.value,
"grocery_and_pharmacy_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_GROCERY_AND_PHARMACY.value,
"parks_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_PARKS.value,
"transit_stations_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_TRANSIT_STATIONS.value,
"workplaces_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_WORKPLACES.value,
"residential_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_RESIDENTIAL.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_SUB_REGION_1.value,
_COLUMNS.REG_SUB_REGION_2.value,
_COLUMNS.REG_ISO_3166_2_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
return df
def _raw_data_formatter_online_111(filepath: str) -> pd.DataFrame:
"""Loads and formats online 111 data."""
df = pd.read_csv(filepath)
# Drop nans.
df.dropna(subset=["ccgcode"], inplace=True)
# Reformat dates.
remap_dict = {
"journeydate":
lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda
_DATE_FORMAT),
"ccgname":
lambda x: x.replace("&", "and"),
"sex": {
"Female": "f",
"Male": "m",
"Indeterminate": "u",
},
"ageband": {
"0-18 years": "0",
"19-69 years": "19",
"70+ years": "70"
}
}
for col, remap in remap_dict.items():
df[col] = df[col].map(remap)
journeydate_values = pd.date_range(
df.journeydate.min(), df.journeydate.max()).strftime(_DATE_FORMAT)
ccgcode_values = df.ccgcode.unique()
df.sex.fillna("u", inplace=True)
sex_values = ["f", "m", "u"]
assert set(sex_values) >= set(df.sex.unique()), "unsupported sex value"
df.ageband.fillna("u", inplace=True)
ageband_values = ["0", "19", "70", "u"]
assert set(ageband_values) >= set(
df.ageband.unique()), "unsupported ageband value"
ccg_code_name_map = df[["ccgcode", "ccgname"
]].set_index("ccgcode")["ccgname"].drop_duplicates()
# Some CCG codes have duplicate names, which differ by their commas. Keep the
# longer ones.
fn = lambda x: sorted(x["ccgname"].map(lambda y: (len(y), y)))[-1][1]
ccg_code_name_map = ccg_code_name_map.reset_i
|
stephen144/odoo
|
addons/website_portal_sale/controllers/main.py
|
Python
|
agpl-3.0
| 1,940 | 0.001546 |
# -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp.addons.website_portal.controllers.main import website_account
class website_account(website_account):
@http.route(['/my/home'], type='http', auth="user", website=True)
def account(self, **kw):
""" Add sales documents to main account page """
response = super(website_account, self).account()
partner = request.env.user.partner_id
res_sale_order = reque
|
st.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('state', 'in', ['sale', 'done'])
])
invoices = res_invoices.search([
|
('state', 'in', ['open', 'paid', 'cancelled'])
])
response.qcontext.update({
'date': datetime.date.today().strftime('%Y-%m-%d'),
'quotations': quotations,
'orders': orders,
'invoices': invoices,
})
return response
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal_sale.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
|
js0701/chromium-crosswalk
|
tools/perf/page_sets/key_search_mobile.py
|
Python
|
bsd-3-clause
| 2,523 | 0.003567 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class KeySearchMobilePage(page_module.Page):
def __init__(self, url, page_set):
super(KeySearchMobilePage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json',
shared_page_state_class=shared_page_state.SharedMobilePageState)
self.archive_data_file = 'data/key_search_mobile.json'
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class KeySearchMobilePageSet(story.StorySet):
""" Key mobile search queries on google """
def __init__(self):
super(KeySearchMobilePageSet, self).__init__(
archive_data_file='data/key_search_mobile.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
urls_list = [
# Why: An empty page should be as snappy as possible
'http://www.google.com/',
# Why: A reasonable search term with no images or ads usually
'https://www.google.com/search?q=science',
# Why: A reasonable search term with images but no ads usually
'http://www.google.com/search?q=orange',
# Why: An address search
# pylint: disable=line-too-long
'https://www.google.com/search?q=1600+Amphitheatre+Pkwy%2C+Mountain+View%2C+CA',
|
# Why: A search for a known actor
'http://www.google.com/search?q=tom+hanks',
# Why: A search for weather
'https://www.google.com/search?q=weather+94110',
# Why: A search for a stock
'http://www.google.com/search?q=goog',
# Why: Charts
'https://www.google.com/search?q=population+of+california',
# Why: Flights
'http://www.google.com/search?q=s
|
fo+jfk+flights',
# Why: Movie showtimes
'https://www.google.com/search?q=movies+94110',
# Why: A tip calculator
'http://www.google.com/search?q=tip+on+100+bill',
# Why: Time
'https://www.google.com/search?q=time+in+san+francisco',
# Why: Definitions
'http://www.google.com/search?q=define+define',
# Why: Local results
'https://www.google.com/search?q=burritos+94110',
# Why: Graph
'http://www.google.com/search?q=x^3'
]
for url in urls_list:
self.AddStory(KeySearchMobilePage(url, self))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.