index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
5,833 |
boto.storage_uri
|
exists
|
Returns True if the object exists or False if it doesn't
|
def exists(self, headers=None):
"""Returns True if the object exists or False if it doesn't"""
if not self.object_name:
raise InvalidUriError('exists on object-less URI (%s)' % self.uri)
bucket = self.get_bucket(headers)
key = bucket.get_key(self.object_name, headers=headers)
return bool(key)
|
(self, headers=None)
|
5,834 |
boto.storage_uri
|
get_acl
|
returns a bucket's acl
|
def get_acl(self, validate=False, headers=None, version_id=None):
"""returns a bucket's acl"""
self._check_bucket_uri('get_acl')
bucket = self.get_bucket(validate, headers)
# This works for both bucket- and object- level ACLs (former passes
# key_name=None):
key_name = self.object_name or ''
if self.get_provider().name == 'aws':
version_id = version_id or self.version_id
acl = bucket.get_acl(key_name, headers, version_id)
else:
acl = bucket.get_acl(key_name, headers, generation=self.generation)
self.check_response(acl, 'acl', self.uri)
return acl
|
(self, validate=False, headers=None, version_id=None)
|
5,835 |
boto.storage_uri
|
get_all_buckets
| null |
def get_all_buckets(self, headers=None):
conn = self.connect()
return conn.get_all_buckets(headers)
|
(self, headers=None)
|
5,836 |
boto.storage_uri
|
get_all_keys
| null |
def get_all_keys(self, validate=False, headers=None, prefix=None):
bucket = self.get_bucket(validate, headers)
return bucket.get_all_keys(headers)
|
(self, validate=False, headers=None, prefix=None)
|
5,837 |
boto.storage_uri
|
get_billing_config
| null |
def get_billing_config(self, headers=None):
self._check_bucket_uri('get_billing_config')
# billing is defined as a bucket param for GCS, but not for S3.
if self.scheme != 'gs':
raise ValueError('get_billing_config() not supported for %s '
'URIs.' % self.scheme)
bucket = self.get_bucket(False, headers)
return bucket.get_billing_config(headers)
|
(self, headers=None)
|
5,838 |
boto.storage_uri
|
get_bucket
| null |
def get_bucket(self, validate=False, headers=None):
self._check_bucket_uri('get_bucket')
conn = self.connect()
bucket = conn.get_bucket(self.bucket_name, validate, headers)
self.check_response(bucket, 'bucket', self.uri)
return bucket
|
(self, validate=False, headers=None)
|
5,839 |
boto.storage_uri
|
get_contents_as_string
| null |
def get_contents_as_string(self, validate=False, headers=None, cb=None,
num_cb=10, torrent=False, version_id=None):
self._check_object_uri('get_contents_as_string')
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
return key.get_contents_as_string(headers, cb, num_cb, torrent,
version_id)
|
(self, validate=False, headers=None, cb=None, num_cb=10, torrent=False, version_id=None)
|
5,840 |
boto.storage_uri
|
get_contents_to_file
| null |
def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None,
res_download_handler=None, response_headers=None,
hash_algs=None):
self._check_object_uri('get_contents_to_file')
key = self.get_key(None, headers)
self.check_response(key, 'key', self.uri)
if hash_algs:
key.get_contents_to_file(fp, headers, cb, num_cb, torrent,
version_id, res_download_handler,
response_headers,
hash_algs=hash_algs)
else:
key.get_contents_to_file(fp, headers, cb, num_cb, torrent,
version_id, res_download_handler,
response_headers)
|
(self, fp, headers=None, cb=None, num_cb=10, torrent=False, version_id=None, res_download_handler=None, response_headers=None, hash_algs=None)
|
5,841 |
boto.storage_uri
|
get_contents_to_stream
| null |
def get_contents_to_stream(self, fp, headers=None, version_id=None):
self._check_object_uri('get_key')
self._warn_about_args('get_key', validate=False)
key = self.get_key(None, headers)
self.check_response(key, 'key', self.uri)
return key.get_contents_to_file(fp, headers, version_id=version_id)
|
(self, fp, headers=None, version_id=None)
|
5,842 |
boto.storage_uri
|
get_cors
|
returns a bucket's CORS XML
|
def get_cors(self, validate=False, headers=None):
"""returns a bucket's CORS XML"""
self._check_bucket_uri('get_cors')
bucket = self.get_bucket(validate, headers)
cors = bucket.get_cors(headers)
self.check_response(cors, 'cors', self.uri)
return cors
|
(self, validate=False, headers=None)
|
5,843 |
boto.storage_uri
|
get_def_acl
|
returns a bucket's default object acl
|
def get_def_acl(self, validate=False, headers=None):
"""returns a bucket's default object acl"""
self._check_bucket_uri('get_def_acl')
bucket = self.get_bucket(validate, headers)
acl = bucket.get_def_acl(headers)
self.check_response(acl, 'acl', self.uri)
return acl
|
(self, validate=False, headers=None)
|
5,844 |
boto.storage_uri
|
get_encryption_config
|
Returns a GCS bucket's encryption configuration.
|
def get_encryption_config(self, validate=False, headers=None):
"""Returns a GCS bucket's encryption configuration."""
self._check_bucket_uri('get_encryption_config')
# EncryptionConfiguration is defined as a bucket param for GCS, but not
# for S3.
if self.scheme != 'gs':
raise ValueError('get_encryption_config() not supported for %s '
'URIs.' % self.scheme)
bucket = self.get_bucket(validate, headers)
return bucket.get_encryption_config(headers=headers)
|
(self, validate=False, headers=None)
|
5,845 |
boto.storage_uri
|
get_key
| null |
def get_key(self, validate=False, headers=None, version_id=None):
self._check_object_uri('get_key')
bucket = self.get_bucket(validate, headers)
if self.get_provider().name == 'aws':
key = bucket.get_key(self.object_name, headers,
version_id=(version_id or self.version_id))
elif self.get_provider().name == 'google':
key = bucket.get_key(self.object_name, headers,
generation=self.generation)
self.check_response(key, 'key', self.uri)
return key
|
(self, validate=False, headers=None, version_id=None)
|
5,846 |
boto.storage_uri
|
get_lifecycle_config
|
Returns a bucket's lifecycle configuration.
|
def get_lifecycle_config(self, validate=False, headers=None):
"""Returns a bucket's lifecycle configuration."""
self._check_bucket_uri('get_lifecycle_config')
bucket = self.get_bucket(validate, headers)
lifecycle_config = bucket.get_lifecycle_config(headers)
self.check_response(lifecycle_config, 'lifecycle', self.uri)
return lifecycle_config
|
(self, validate=False, headers=None)
|
5,847 |
boto.storage_uri
|
get_location
| null |
def get_location(self, validate=False, headers=None):
self._check_bucket_uri('get_location')
bucket = self.get_bucket(validate, headers)
return bucket.get_location(headers)
|
(self, validate=False, headers=None)
|
5,848 |
boto.storage_uri
|
get_logging_config
| null |
def get_logging_config(self, validate=False, headers=None, version_id=None):
self._check_bucket_uri('get_logging_config')
bucket = self.get_bucket(validate, headers)
return bucket.get_logging_config(headers=headers)
|
(self, validate=False, headers=None, version_id=None)
|
5,849 |
boto.storage_uri
|
get_provider
| null |
def get_provider(self):
conn = self.connect()
provider = conn.provider
self.check_response(provider, 'provider', self.uri)
return provider
|
(self)
|
5,850 |
boto.storage_uri
|
get_storage_class
| null |
def get_storage_class(self, validate=False, headers=None):
self._check_bucket_uri('get_storage_class')
# StorageClass is defined as a bucket and object param for GCS, but
# only as a key param for S3.
if self.scheme != 'gs':
raise ValueError('get_storage_class() not supported for %s '
'URIs.' % self.scheme)
bucket = self.get_bucket(validate, headers)
return bucket.get_storage_class(headers)
|
(self, validate=False, headers=None)
|
5,851 |
boto.storage_uri
|
get_subresource
| null |
def get_subresource(self, subresource, validate=False, headers=None,
version_id=None):
self._check_bucket_uri('get_subresource')
bucket = self.get_bucket(validate, headers)
return bucket.get_subresource(subresource, self.object_name, headers,
version_id)
|
(self, subresource, validate=False, headers=None, version_id=None)
|
5,852 |
boto.storage_uri
|
get_versioning_config
| null |
def get_versioning_config(self, headers=None):
self._check_bucket_uri('get_versioning_config')
bucket = self.get_bucket(False, headers)
return bucket.get_versioning_status(headers)
|
(self, headers=None)
|
5,853 |
boto.storage_uri
|
get_website_config
| null |
def get_website_config(self, validate=False, headers=None):
self._check_bucket_uri('get_website_config')
bucket = self.get_bucket(validate, headers)
return bucket.get_website_configuration(headers)
|
(self, validate=False, headers=None)
|
5,854 |
boto.storage_uri
|
has_version
| null |
def has_version(self):
return (issubclass(type(self), BucketStorageUri)
and ((self.version_id is not None)
or (self.generation is not None)))
|
(self)
|
5,855 |
boto.storage_uri
|
is_cloud_uri
|
Returns True if this URI names a bucket or object.
|
def is_cloud_uri(self):
"""Returns True if this URI names a bucket or object."""
return True
|
(self)
|
5,856 |
boto.storage_uri
|
is_file_uri
|
Returns True if this URI names a file or directory.
|
def is_file_uri(self):
"""Returns True if this URI names a file or directory."""
return False
|
(self)
|
5,857 |
boto.storage_uri
|
is_stream
|
Returns True if this URI represents input/output stream.
|
def is_stream(self):
"""Returns True if this URI represents input/output stream."""
return False
|
(self)
|
5,858 |
boto.storage_uri
|
list_bucket
| null |
def list_bucket(self, prefix='', delimiter='', headers=None,
all_versions=False):
self._check_bucket_uri('list_bucket')
bucket = self.get_bucket(headers=headers)
if all_versions:
return (v for v in bucket.list_versions(
prefix=prefix, delimiter=delimiter, headers=headers)
if not isinstance(v, DeleteMarker))
else:
return bucket.list(prefix=prefix, delimiter=delimiter,
headers=headers)
|
(self, prefix='', delimiter='', headers=None, all_versions=False)
|
5,859 |
boto.storage_uri
|
list_grants
| null |
def list_grants(self, headers=None):
self._check_bucket_uri('list_grants ')
bucket = self.get_bucket(headers)
return bucket.list_grants(headers)
|
(self, headers=None)
|
5,860 |
boto.storage_uri
|
names_bucket
|
Returns True if this URI names a bucket.
|
def names_bucket(self):
"""Returns True if this URI names a bucket."""
return bool(self.bucket_name) and bool(not self.object_name)
|
(self)
|
5,861 |
boto.storage_uri
|
names_container
|
Returns True if this URI names a directory or bucket. Will return
False for bucket subdirs; providing bucket subdir semantics needs to
be done by the caller (like gsutil does).
|
def names_container(self):
"""
Returns True if this URI names a directory or bucket. Will return
False for bucket subdirs; providing bucket subdir semantics needs to
be done by the caller (like gsutil does).
"""
return bool(not self.object_name)
|
(self)
|
5,862 |
boto.storage_uri
|
names_directory
|
Returns True if this URI names a directory.
|
def names_directory(self):
"""Returns True if this URI names a directory."""
return False
|
(self)
|
5,863 |
boto.storage_uri
|
names_file
|
Returns True if this URI names a file.
|
def names_file(self):
"""Returns True if this URI names a file."""
return False
|
(self)
|
5,864 |
boto.storage_uri
|
names_object
|
Returns True if this URI names an object.
|
def names_object(self):
"""Returns True if this URI names an object."""
return self.names_singleton()
|
(self)
|
5,865 |
boto.storage_uri
|
names_provider
|
Returns True if this URI names a provider.
|
def names_provider(self):
"""Returns True if this URI names a provider."""
return bool(not self.bucket_name)
|
(self)
|
5,866 |
boto.storage_uri
|
names_singleton
|
Returns True if this URI names a file or object.
|
def names_singleton(self):
"""Returns True if this URI names a file or object."""
return bool(self.object_name)
|
(self)
|
5,867 |
boto.storage_uri
|
new_key
| null |
def new_key(self, validate=False, headers=None):
self._check_object_uri('new_key')
bucket = self.get_bucket(validate, headers)
return bucket.new_key(self.object_name)
|
(self, validate=False, headers=None)
|
5,868 |
boto.storage_uri
|
set_acl
|
Sets or updates a bucket's ACL.
|
def set_acl(self, acl_or_str, key_name='', validate=False, headers=None,
version_id=None, if_generation=None, if_metageneration=None):
"""Sets or updates a bucket's ACL."""
self._check_bucket_uri('set_acl')
key_name = key_name or self.object_name or ''
bucket = self.get_bucket(validate, headers)
if self.generation:
bucket.set_acl(
acl_or_str, key_name, headers, generation=self.generation,
if_generation=if_generation, if_metageneration=if_metageneration)
else:
version_id = version_id or self.version_id
bucket.set_acl(acl_or_str, key_name, headers, version_id)
|
(self, acl_or_str, key_name='', validate=False, headers=None, version_id=None, if_generation=None, if_metageneration=None)
|
5,869 |
boto.storage_uri
|
set_canned_acl
|
Sets or updates a bucket's acl to a predefined (canned) value.
|
def set_canned_acl(self, acl_str, validate=False, headers=None,
version_id=None):
"""Sets or updates a bucket's acl to a predefined (canned) value."""
self._check_object_uri('set_canned_acl')
self._warn_about_args('set_canned_acl', version_id=version_id)
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
key.set_canned_acl(acl_str, headers)
|
(self, acl_str, validate=False, headers=None, version_id=None)
|
5,870 |
boto.storage_uri
|
set_contents_from_file
| null |
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
num_cb=10, policy=None, md5=None, size=None,
rewind=False, res_upload_handler=None):
self._check_object_uri('set_contents_from_file')
key = self.new_key(headers=headers)
if self.scheme == 'gs':
result = key.set_contents_from_file(
fp, headers, replace, cb, num_cb, policy, md5, size=size,
rewind=rewind, res_upload_handler=res_upload_handler)
if res_upload_handler:
self._update_from_values(None, res_upload_handler.generation,
None, md5)
else:
self._warn_about_args('set_contents_from_file',
res_upload_handler=res_upload_handler)
result = key.set_contents_from_file(
fp, headers, replace, cb, num_cb, policy, md5, size=size,
rewind=rewind)
self._update_from_key(key)
return result
|
(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, size=None, rewind=False, res_upload_handler=None)
|
5,871 |
boto.storage_uri
|
set_contents_from_stream
| null |
def set_contents_from_stream(self, fp, headers=None, replace=True, cb=None,
policy=None, reduced_redundancy=False):
self._check_object_uri('set_contents_from_stream')
dst_key = self.new_key(False, headers)
result = dst_key.set_contents_from_stream(
fp, headers, replace, cb, policy=policy,
reduced_redundancy=reduced_redundancy)
self._update_from_key(dst_key)
return result
|
(self, fp, headers=None, replace=True, cb=None, policy=None, reduced_redundancy=False)
|
5,872 |
boto.storage_uri
|
set_contents_from_string
| null |
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False):
self._check_object_uri('set_contents_from_string')
key = self.new_key(headers=headers)
if self.scheme == 'gs':
if reduced_redundancy:
sys.stderr.write('Warning: GCS does not support '
'reduced_redundancy; argument ignored by '
'set_contents_from_string')
result = key.set_contents_from_string(
s, headers, replace, cb, num_cb, policy, md5)
else:
result = key.set_contents_from_string(
s, headers, replace, cb, num_cb, policy, md5,
reduced_redundancy)
self._update_from_key(key)
return result
|
(self, s, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, reduced_redundancy=False)
|
5,873 |
boto.storage_uri
|
set_cors
|
sets or updates a bucket's CORS XML
|
def set_cors(self, cors, validate=False, headers=None):
"""sets or updates a bucket's CORS XML"""
self._check_bucket_uri('set_cors ')
bucket = self.get_bucket(validate, headers)
if self.scheme == 's3':
bucket.set_cors(cors, headers)
else:
bucket.set_cors(cors.to_xml(), headers)
|
(self, cors, validate=False, headers=None)
|
5,874 |
boto.storage_uri
|
set_def_acl
|
Sets or updates a bucket's default object ACL.
|
def set_def_acl(self, acl_or_str, validate=False, headers=None,
version_id=None):
"""Sets or updates a bucket's default object ACL."""
self._check_bucket_uri('set_def_acl')
self.get_bucket(validate, headers).set_def_acl(acl_or_str, headers)
|
(self, acl_or_str, validate=False, headers=None, version_id=None)
|
5,875 |
boto.storage_uri
|
set_def_canned_acl
|
Sets or updates a bucket's default object acl to a predefined
(canned) value.
|
def set_def_canned_acl(self, acl_str, validate=False, headers=None,
version_id=None):
"""Sets or updates a bucket's default object acl to a predefined
(canned) value."""
self._check_bucket_uri('set_def_canned_acl ')
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
key.set_def_canned_acl(acl_str, headers, version_id)
|
(self, acl_str, validate=False, headers=None, version_id=None)
|
5,876 |
boto.storage_uri
|
set_def_xml_acl
|
Sets or updates a bucket's default object ACL with an XML string.
|
def set_def_xml_acl(self, xmlstring, validate=False, headers=None):
"""Sets or updates a bucket's default object ACL with an XML string."""
self._check_bucket_uri('set_def_xml_acl')
self.get_bucket(validate, headers).set_def_xml_acl(xmlstring, headers)
|
(self, xmlstring, validate=False, headers=None)
|
5,877 |
boto.storage_uri
|
set_encryption_config
|
Sets a GCS bucket's encryption configuration.
|
def set_encryption_config(self, default_kms_key_name=None, validate=False,
headers=None):
"""Sets a GCS bucket's encryption configuration."""
self._check_bucket_uri('set_encryption_config')
bucket = self.get_bucket(validate, headers)
bucket.set_encryption_config(default_kms_key_name=default_kms_key_name,
headers=headers)
|
(self, default_kms_key_name=None, validate=False, headers=None)
|
5,878 |
boto.storage_uri
|
set_metadata
| null |
def set_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
return self.get_key(False).set_remote_metadata(metadata_plus,
metadata_minus,
preserve_acl,
headers=headers)
|
(self, metadata_plus, metadata_minus, preserve_acl, headers=None)
|
5,879 |
boto.storage_uri
|
set_storage_class
|
Updates a bucket's storage class.
|
def set_storage_class(self, storage_class, validate=False, headers=None):
"""Updates a bucket's storage class."""
self._check_bucket_uri('set_storage_class')
# StorageClass is defined as a bucket and object param for GCS, but
# only as a key param for S3.
if self.scheme != 'gs':
raise ValueError('set_storage_class() not supported for %s '
'URIs.' % self.scheme)
bucket = self.get_bucket(validate, headers)
bucket.set_storage_class(storage_class, headers)
|
(self, storage_class, validate=False, headers=None)
|
5,880 |
boto.storage_uri
|
set_subresource
| null |
def set_subresource(self, subresource, value, validate=False, headers=None,
version_id=None):
self._check_bucket_uri('set_subresource')
bucket = self.get_bucket(validate, headers)
bucket.set_subresource(subresource, value, self.object_name, headers,
version_id)
|
(self, subresource, value, validate=False, headers=None, version_id=None)
|
5,881 |
boto.storage_uri
|
set_website_config
| null |
def set_website_config(self, main_page_suffix=None, error_key=None,
validate=False, headers=None):
self._check_bucket_uri('set_website_config')
bucket = self.get_bucket(validate, headers)
if not (main_page_suffix or error_key):
bucket.delete_website_configuration(headers)
else:
bucket.configure_website(main_page_suffix, error_key, headers)
|
(self, main_page_suffix=None, error_key=None, validate=False, headers=None)
|
5,882 |
boto.storage_uri
|
set_xml_acl
|
Sets or updates a bucket's ACL with an XML string.
|
def set_xml_acl(self, xmlstring, key_name='', validate=False, headers=None,
version_id=None, if_generation=None, if_metageneration=None):
"""Sets or updates a bucket's ACL with an XML string."""
self._check_bucket_uri('set_xml_acl')
key_name = key_name or self.object_name or ''
bucket = self.get_bucket(validate, headers)
if self.generation:
bucket.set_xml_acl(
xmlstring, key_name, headers, generation=self.generation,
if_generation=if_generation, if_metageneration=if_metageneration)
else:
version_id = version_id or self.version_id
bucket.set_xml_acl(xmlstring, key_name, headers,
version_id=version_id)
|
(self, xmlstring, key_name='', validate=False, headers=None, version_id=None, if_generation=None, if_metageneration=None)
|
5,883 |
boto.pyami.config
|
Config
| null |
class Config(object):
def __init__(self, path=None, fp=None, do_load=True):
self._parser = ConfigParser({'working_dir': '/mnt/pyami',
'debug': '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
if "AWS_CREDENTIAL_FILE" in os.environ:
full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
self.load_credential_file(full_path)
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
def __setstate__(self, state):
# There's test that verify that (transitively) a Config
# object can be pickled. Now that we're storing a _parser
# attribute and relying on __getattr__ to proxy requests,
# we need to implement setstate to ensure we don't get
# into recursive loops when looking up _parser when
# this object is unpickled.
self._parser = state['_parser']
def __getattr__(self, name):
return getattr(self._parser, name)
def has_option(self, *args, **kwargs):
return self._parser.has_option(*args, **kwargs)
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data)
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except (NoOptionError, NoSectionError):
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except (NoOptionError, NoSectionError):
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except (NoOptionError, NoSectionError):
val = default
return val
def get_value(self, section, name, default=None):
return self.get(section, name, default)
def get(self, section, name, default=None):
try:
return self._parser.get(section, name)
except (NoOptionError, NoSectionError):
return default
def getint(self, section, name, default=0):
try:
return self._parser.getint(section, name)
except (NoOptionError, NoSectionError):
return int(default)
def getfloat(self, section, name, default=0.0):
try:
return self._parser.getfloat(section, name)
except (NoOptionError, NoSectionError):
return float(default)
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
def dump(self):
s = StringIO()
self.write(s)
print(s.getvalue())
def dump_safe(self, fp=None):
if not fp:
fp = StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
def dump_to_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = json.dumps(d)
item.save()
def load_from_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value is None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
|
(path=None, fp=None, do_load=True)
|
5,884 |
boto.pyami.config
|
__getattr__
| null |
def __getattr__(self, name):
return getattr(self._parser, name)
|
(self, name)
|
5,885 |
boto.pyami.config
|
__init__
| null |
def __init__(self, path=None, fp=None, do_load=True):
self._parser = ConfigParser({'working_dir': '/mnt/pyami',
'debug': '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
if "AWS_CREDENTIAL_FILE" in os.environ:
full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
self.load_credential_file(full_path)
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
|
(self, path=None, fp=None, do_load=True)
|
5,886 |
boto.pyami.config
|
__setstate__
| null |
def __setstate__(self, state):
# There's test that verify that (transitively) a Config
# object can be pickled. Now that we're storing a _parser
# attribute and relying on __getattr__ to proxy requests,
# we need to implement setstate to ensure we don't get
# into recursive loops when looking up _parser when
# this object is unpickled.
self._parser = state['_parser']
|
(self, state)
|
5,887 |
boto.pyami.config
|
dump
| null |
def dump(self):
s = StringIO()
self.write(s)
print(s.getvalue())
|
(self)
|
5,888 |
boto.pyami.config
|
dump_safe
| null |
def dump_safe(self, fp=None):
if not fp:
fp = StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
|
(self, fp=None)
|
5,889 |
boto.pyami.config
|
dump_to_sdb
| null |
def dump_to_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = json.dumps(d)
item.save()
|
(self, domain_name, item_name)
|
5,890 |
boto.pyami.config
|
get
| null |
def get(self, section, name, default=None):
try:
return self._parser.get(section, name)
except (NoOptionError, NoSectionError):
return default
|
(self, section, name, default=None)
|
5,891 |
boto.pyami.config
|
get_instance
| null |
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except (NoOptionError, NoSectionError):
val = default
return val
|
(self, name, default=None)
|
5,892 |
boto.pyami.config
|
get_user
| null |
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except (NoOptionError, NoSectionError):
val = default
return val
|
(self, name, default=None)
|
5,893 |
boto.pyami.config
|
get_value
| null |
def get_value(self, section, name, default=None):
return self.get(section, name, default)
|
(self, section, name, default=None)
|
5,894 |
boto.pyami.config
|
getbool
| null |
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
|
(self, section, name, default=False)
|
5,895 |
boto.pyami.config
|
getfloat
| null |
def getfloat(self, section, name, default=0.0):
try:
return self._parser.getfloat(section, name)
except (NoOptionError, NoSectionError):
return float(default)
|
(self, section, name, default=0.0)
|
5,896 |
boto.pyami.config
|
getint
| null |
def getint(self, section, name, default=0):
try:
return self._parser.getint(section, name)
except (NoOptionError, NoSectionError):
return int(default)
|
(self, section, name, default=0)
|
5,897 |
boto.pyami.config
|
getint_user
| null |
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except (NoOptionError, NoSectionError):
val = default
return val
|
(self, name, default=0)
|
5,898 |
boto.pyami.config
|
has_option
| null |
def has_option(self, *args, **kwargs):
return self._parser.has_option(*args, **kwargs)
|
(self, *args, **kwargs)
|
5,899 |
boto.pyami.config
|
load_credential_file
|
Load a credential file as is setup like the Java utilities
|
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data)
|
(self, path)
|
5,900 |
boto.pyami.config
|
load_from_path
| null |
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
|
(self, path)
|
5,901 |
boto.pyami.config
|
load_from_sdb
| null |
def load_from_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value is None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
|
(self, domain_name, item_name)
|
5,902 |
boto.pyami.config
|
save_option
|
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
|
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
|
(self, path, section, option, value)
|
5,903 |
boto.pyami.config
|
save_system_option
| null |
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
|
(self, section, option, value)
|
5,904 |
boto.pyami.config
|
save_user_option
| null |
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
|
(self, section, option, value)
|
5,905 |
boto.pyami.config
|
setbool
| null |
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
|
(self, section, name, value)
|
5,906 |
boto.storage_uri
|
FileStorageUri
|
StorageUri subclass that handles files in the local file system.
Callers should instantiate this class by calling boto.storage_uri().
See file/README about how we map StorageUri operations onto a file system.
|
class FileStorageUri(StorageUri):
"""
StorageUri subclass that handles files in the local file system.
Callers should instantiate this class by calling boto.storage_uri().
See file/README about how we map StorageUri operations onto a file system.
"""
delim = os.sep
def __init__(self, object_name, debug, is_stream=False):
"""Instantiate a FileStorageUri from a path name.
@type object_name: string
@param object_name: object name
@type debug: boolean
@param debug: whether to enable debugging on this StorageUri
After instantiation the components are available in the following
fields: uri, scheme, bucket_name (always blank for this "anonymous"
bucket), object_name.
"""
self.scheme = 'file'
self.bucket_name = ''
self.object_name = object_name
self.uri = 'file://' + object_name
self.debug = debug
self.stream = is_stream
def clone_replace_name(self, new_name):
"""Instantiate a FileStorageUri from the current FileStorageUri,
but replacing the object_name.
@type new_name: string
@param new_name: new object name
"""
return FileStorageUri(new_name, self.debug, self.stream)
def is_file_uri(self):
"""Returns True if this URI names a file or directory."""
return True
def is_cloud_uri(self):
"""Returns True if this URI names a bucket or object."""
return False
def names_container(self):
"""Returns True if this URI names a directory or bucket."""
return self.names_directory()
def names_singleton(self):
"""Returns True if this URI names a file (or stream) or object."""
return not self.names_container()
def names_directory(self):
"""Returns True if this URI names a directory."""
if self.stream:
return False
return os.path.isdir(self.object_name)
def names_provider(self):
"""Returns True if this URI names a provider."""
return False
def names_bucket(self):
"""Returns True if this URI names a bucket."""
return False
def names_file(self):
"""Returns True if this URI names a file."""
return self.names_singleton()
def names_object(self):
"""Returns True if this URI names an object."""
return False
def is_stream(self):
"""Returns True if this URI represents input/output stream.
"""
return bool(self.stream)
def close(self):
"""Closes the underlying file.
"""
self.get_key().close()
def exists(self, _headers_not_used=None):
"""Returns True if the file exists or False if it doesn't"""
# The _headers_not_used parameter is ignored. It is only there to ensure
# that this method's signature is identical to the exists method on the
# BucketStorageUri class.
return os.path.exists(self.object_name)
|
(object_name, debug, is_stream=False)
|
5,907 |
boto.storage_uri
|
__init__
|
Instantiate a FileStorageUri from a path name.
@type object_name: string
@param object_name: object name
@type debug: boolean
@param debug: whether to enable debugging on this StorageUri
After instantiation the components are available in the following
fields: uri, scheme, bucket_name (always blank for this "anonymous"
bucket), object_name.
|
def __init__(self, object_name, debug, is_stream=False):
"""Instantiate a FileStorageUri from a path name.
@type object_name: string
@param object_name: object name
@type debug: boolean
@param debug: whether to enable debugging on this StorageUri
After instantiation the components are available in the following
fields: uri, scheme, bucket_name (always blank for this "anonymous"
bucket), object_name.
"""
self.scheme = 'file'
self.bucket_name = ''
self.object_name = object_name
self.uri = 'file://' + object_name
self.debug = debug
self.stream = is_stream
|
(self, object_name, debug, is_stream=False)
|
5,915 |
boto.storage_uri
|
clone_replace_name
|
Instantiate a FileStorageUri from the current FileStorageUri,
but replacing the object_name.
@type new_name: string
@param new_name: new object name
|
def clone_replace_name(self, new_name):
"""Instantiate a FileStorageUri from the current FileStorageUri,
but replacing the object_name.
@type new_name: string
@param new_name: new object name
"""
return FileStorageUri(new_name, self.debug, self.stream)
|
(self, new_name)
|
5,916 |
boto.storage_uri
|
close
|
Closes the underlying file.
|
def close(self):
"""Closes the underlying file.
"""
self.get_key().close()
|
(self)
|
5,918 |
boto.storage_uri
|
delete_key
| null |
def delete_key(self, validate=False, headers=None, version_id=None,
mfa_token=None):
self._check_object_uri('delete_key')
bucket = self.get_bucket(validate, headers)
return bucket.delete_key(self.object_name, headers, version_id,
mfa_token)
|
(self, validate=False, headers=None, version_id=None, mfa_token=None)
|
5,920 |
boto.storage_uri
|
exists
|
Returns True if the file exists or False if it doesn't
|
def exists(self, _headers_not_used=None):
"""Returns True if the file exists or False if it doesn't"""
# The _headers_not_used parameter is ignored. It is only there to ensure
# that this method's signature is identical to the exists method on the
# BucketStorageUri class.
return os.path.exists(self.object_name)
|
(self, _headers_not_used=None)
|
5,926 |
boto.storage_uri
|
get_key
| null |
def get_key(self, validate=False, headers=None, version_id=None):
self._check_object_uri('get_key')
bucket = self.get_bucket(validate, headers)
key = bucket.get_key(self.object_name, headers, version_id)
self.check_response(key, 'key', self.uri)
return key
|
(self, validate=False, headers=None, version_id=None)
|
5,928 |
boto.storage_uri
|
is_cloud_uri
|
Returns True if this URI names a bucket or object.
|
def is_cloud_uri(self):
"""Returns True if this URI names a bucket or object."""
return False
|
(self)
|
5,929 |
boto.storage_uri
|
is_file_uri
|
Returns True if this URI names a file or directory.
|
def is_file_uri(self):
"""Returns True if this URI names a file or directory."""
return True
|
(self)
|
5,930 |
boto.storage_uri
|
is_stream
|
Returns True if this URI represents input/output stream.
|
def is_stream(self):
"""Returns True if this URI represents input/output stream.
"""
return bool(self.stream)
|
(self)
|
5,932 |
boto.storage_uri
|
names_bucket
|
Returns True if this URI names a bucket.
|
def names_bucket(self):
"""Returns True if this URI names a bucket."""
return False
|
(self)
|
5,933 |
boto.storage_uri
|
names_container
|
Returns True if this URI names a directory or bucket.
|
def names_container(self):
"""Returns True if this URI names a directory or bucket."""
return self.names_directory()
|
(self)
|
5,934 |
boto.storage_uri
|
names_directory
|
Returns True if this URI names a directory.
|
def names_directory(self):
"""Returns True if this URI names a directory."""
if self.stream:
return False
return os.path.isdir(self.object_name)
|
(self)
|
5,935 |
boto.storage_uri
|
names_file
|
Returns True if this URI names a file.
|
def names_file(self):
"""Returns True if this URI names a file."""
return self.names_singleton()
|
(self)
|
5,936 |
boto.storage_uri
|
names_object
|
Returns True if this URI names an object.
|
def names_object(self):
"""Returns True if this URI names an object."""
return False
|
(self)
|
5,937 |
boto.storage_uri
|
names_provider
|
Returns True if this URI names a provider.
|
def names_provider(self):
"""Returns True if this URI names a provider."""
return False
|
(self)
|
5,938 |
boto.storage_uri
|
names_singleton
|
Returns True if this URI names a file (or stream) or object.
|
def names_singleton(self):
"""Returns True if this URI names a file (or stream) or object."""
return not self.names_container()
|
(self)
|
5,940 |
boto.exception
|
InvalidUriError
|
Exception raised when URI is invalid.
|
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
|
(message)
|
5,941 |
boto.exception
|
__init__
| null |
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
|
(self, message)
|
5,942 |
boto
|
NullHandler
| null |
class NullHandler(logging.Handler):
def emit(self, record):
pass
|
(level=0)
|
5,945 |
logging
|
_at_fork_reinit
| null |
def _at_fork_reinit(self):
self.lock._at_fork_reinit()
|
(self)
|
5,949 |
logging
|
createLock
|
Acquire a thread lock for serializing access to the underlying I/O.
|
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
self.lock = threading.RLock()
_register_at_fork_reinit_lock(self)
|
(self)
|
5,950 |
boto
|
emit
| null |
def emit(self, record):
pass
|
(self, record)
|
5,955 |
logging
|
handle
|
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
|
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
|
(self, record)
|
5,964 |
boto
|
connect_autoscale
|
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
:type use_block_device_types bool
:param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing
block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability
with the old incorrect style.
|
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
:type use_block_device_types bool
:param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing
block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability
with the old incorrect style.
"""
from boto.ec2.autoscale import AutoScaleConnection
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
|
(aws_access_key_id=None, aws_secret_access_key=None, **kwargs)
|
5,965 |
boto
|
connect_awslambda
|
Connect to AWS Lambda
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection`
:return: A connection to the AWS Lambda service
|
def connect_awslambda(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS Lambda
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection`
:return: A connection to the AWS Lambda service
"""
from boto.awslambda.layer1 import AWSLambdaConnection
return AWSLambdaConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
|
(aws_access_key_id=None, aws_secret_access_key=None, **kwargs)
|
5,966 |
boto
|
connect_beanstalk
|
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.beanstalk.layer1.Layer1`
:return: A connection to Amazon's Elastic Beanstalk service
|
def connect_beanstalk(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.beanstalk.layer1.Layer1`
:return: A connection to Amazon's Elastic Beanstalk service
"""
from boto.beanstalk.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
|
(aws_access_key_id=None, aws_secret_access_key=None, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.