|
1 | 1 | # Copyright 2013. Amazon Web Services, Inc. All Rights Reserved.
|
2 |
| -# |
| 2 | +# |
3 | 3 | # Licensed under the Apache License, Version 2.0 (the "License");
|
4 | 4 | # you may not use this file except in compliance with the License.
|
5 | 5 | # You may obtain a copy of the License at
|
6 |
| -# |
| 6 | +# |
7 | 7 | # http://www.apache.org/licenses/LICENSE-2.0
|
8 |
| -# |
| 8 | +# |
9 | 9 | # Unless required by applicable law or agreed to in writing, software
|
10 | 10 | # distributed under the License is distributed on an "AS IS" BASIS,
|
11 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 | 12 | # See the License for the specific language governing permissions and
|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 | 15 | # Import the SDK
|
16 |
| -import boto |
| 16 | +import boto3 |
17 | 17 | import uuid
|
18 | 18 |
|
19 |
| -# Instantiate a new client for Amazon Simple Storage Service (S3). With no |
20 |
| -# parameters or configuration, the AWS SDK for Python (Boto) will look for |
21 |
| -# access keys in these environment variables: |
| 19 | +# boto3 offers two different styles of API - Resource API (high-level) and |
| 20 | +# Client API (low-level). Client API maps directly to the underlying RPC-style |
| 21 | +# service operations (put_object, delete_object, etc.). Resource API provides |
| 22 | +# an object-oriented abstraction on top (object.delete(), object.put()). |
| 23 | +# |
| 24 | +# While Resource APIs may help simplify your code and feel more intuitive to |
| 25 | +# some, others may prefer the explicitness and control over network calls |
| 26 | +# offered by Client APIs. For new AWS customers, we recommend getting started |
| 27 | +# with Resource APIs, if available for the service being used. At the time of |
| 28 | +# writing they're available for Amazon EC2, Amazon S3, Amazon DynamoDB, Amazon |
| 29 | +# SQS, Amazon SNS, AWS IAM, Amazon Glacier, AWS OpsWorks, AWS CloudFormation, |
| 30 | +# and Amazon CloudWatch. This sample will show both styles. |
22 | 31 | #
|
23 |
| -# AWS_ACCESS_KEY_ID='...' |
24 |
| -# AWS_SECRET_ACCESS_KEY='...' |
| 32 | +# First, we'll start with Client API for Amazon S3. Let's instantiate a new |
| 33 | +# client object. With no parameters or configuration, boto3 will look for |
| 34 | +# access keys in these places: |
25 | 35 | #
|
26 |
| -# For more information about this interface to Amazon S3, see: |
27 |
| -# http://boto.readthedocs.org/en/latest/s3_tut.html |
28 |
| -s3 = boto.connect_s3() |
| 36 | +# 1. Environment variables (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) |
| 37 | +# 2. Credentials file (~/.aws/credentials or |
| 38 | +# C:\Users\USER_NAME\.aws\credentials) |
| 39 | +# 3. AWS IAM role for Amazon EC2 instance |
| 40 | +# (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) |
| 41 | + |
| 42 | +s3client = boto3.client('s3') |
29 | 43 |
|
30 | 44 | # Everything uploaded to Amazon S3 must belong to a bucket. These buckets are
|
31 | 45 | # in the global namespace, and must have a unique name.
|
32 | 46 | #
|
33 | 47 | # For more information about bucket name restrictions, see:
|
34 | 48 | # http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
|
35 |
| -bucket_name = "python-sdk-sample-%s" % uuid.uuid4() |
36 |
| -print("Creating new bucket with name: " + bucket_name) |
37 |
| -bucket = s3.create_bucket(bucket_name) |
| 49 | +bucket_name = 'python-sdk-sample-{}'.format(uuid.uuid4()) |
| 50 | +print('Creating new bucket with name: {}'.format(bucket_name)) |
| 51 | +s3client.create_bucket(Bucket=bucket_name) |
38 | 52 |
|
39 |
| -# Files in Amazon S3 are called "objects" and are stored in buckets. A specific |
40 |
| -# object is referred to by its key (i.e., name) and holds data. Here, we create |
41 |
| -# a new object with the key "python_sample_key.txt" and content "Hello World!". |
42 |
| -# |
43 |
| -# For more information on keys and set_contents_from_string, see: |
44 |
| -# http://boto.readthedocs.org/en/latest/s3_tut.html#storing-data |
45 |
| -from boto.s3.key import Key |
46 |
| -k = Key(bucket) |
47 |
| -k.key = 'python_sample_key.txt' |
48 |
| - |
49 |
| -print ("Uploading some data to " + bucket_name + " with key: " + k.key) |
50 |
| -k.set_contents_from_string('Hello World!') |
51 |
| - |
52 |
| -# Fetch the key to show that we stored something. Key.generate_url will |
53 |
| -# construct a URL that can be used to access the object for a limited time. |
54 |
| -# Here, we set it to expire in 30 minutes. |
| 53 | +# Now the bucket is created, and you'll find it in your list of buckets. |
| 54 | + |
| 55 | +list_buckets_resp = s3client.list_buckets() |
| 56 | +for bucket in list_buckets_resp['Buckets']: |
| 57 | + if bucket['Name'] == bucket_name: |
| 58 | + print('(Just created) --> {} - there since {}'.format( |
| 59 | + bucket['Name'], bucket['CreationDate'])) |
| 60 | + |
| 61 | +# Files in Amazon S3 are called "objects" and are stored in buckets. A |
| 62 | +# specific object is referred to by its key (i.e., name) and holds data. Here, |
| 63 | +# we create (put) a new object with the key "python_sample_key.txt" and |
| 64 | +# content "Hello World!". |
| 65 | + |
| 66 | +object_key = 'python_sample_key.txt' |
| 67 | + |
| 68 | +print('Uploading some data to {} with key: {}'.format( |
| 69 | + bucket_name, object_key)) |
| 70 | +s3client.put_object(Bucket=bucket_name, Key=object_key, Body=b'Hello World!') |
| 71 | + |
| 72 | +# Using the client, you can generate a pre-signed URL that you can give |
| 73 | +# others to securely share the object without making it publicly accessible. |
| 74 | +# By default, the generated URL will expire and no longer function after one |
| 75 | +# hour. You can change the expiration to be from 1 second to 604800 seconds |
| 76 | +# (1 week). |
| 77 | + |
| 78 | +url = s3client.generate_presigned_url( |
| 79 | + 'get_object', {'Bucket': bucket_name, 'Key': object_key}) |
| 80 | +print('\nTry this URL in your browser to download the object:') |
| 81 | +print(url) |
| 82 | + |
| 83 | +try: |
| 84 | + input = raw_input |
| 85 | +except NameError: |
| 86 | + pass |
| 87 | +input("\nPress enter to continue...") |
| 88 | + |
| 89 | +# As we've seen in the create_bucket, list_buckets, and put_object methods, |
| 90 | +# Client API requires you to explicitly specify all the input parameters for |
| 91 | +# each operation. Most methods in the client class map to a single underlying |
| 92 | +# API call to the AWS service - Amazon S3 in our case. |
55 | 93 | #
|
56 |
| -# For a more detailed overview of generate_url's options, see: |
57 |
| -# http://boto.readthedocs.org/en/latest/ref/s3.html#boto.s3.key.Key.generate_url |
58 |
| -expires_in_seconds = 1800 |
| 94 | +# Now that you got the hang of the Client API, let's take a look at Resouce |
| 95 | +# API, which provides resource objects that further abstract out the over-the- |
| 96 | +# network API calls. |
| 97 | +# Here, we'll instantiate and use 'bucket' or 'object' objects. |
| 98 | + |
| 99 | +print('\nNow using Resource API') |
| 100 | +# First, create the service resource object |
| 101 | +s3resource = boto3.resource('s3') |
| 102 | +# Now, the bucket object |
| 103 | +bucket = s3resource.Bucket(bucket_name) |
| 104 | +# Then, the object object |
| 105 | +obj = bucket.Object(object_key) |
| 106 | +print('Bucket name: {}'.format(bucket.name)) |
| 107 | +print('Object key: {}'.format(obj.key)) |
| 108 | +print('Object content length: {}'.format(obj.content_length)) |
| 109 | +print('Object body: {}'.format(obj.get()['Body'].read())) |
| 110 | +print('Object last modified: {}'.format(obj.last_modified)) |
59 | 111 |
|
60 |
| -print("Generating a public URL for the object we just uploaded. This URL will be active for %d seconds" % expires_in_seconds) |
61 |
| -print('') |
62 |
| -print(k.generate_url(expires_in_seconds)) |
63 |
| -print('') |
| 112 | +# Buckets cannot be deleted unless they're empty. Let's keep using the |
| 113 | +# Resource API to delete everything. Here, we'll utilize the collection |
| 114 | +# 'objects' and its batch action 'delete'. Batch actions return a list |
| 115 | +# of responses, because boto3 may have to take multiple actions iteratively to |
| 116 | +# complete the action. |
64 | 117 |
|
| 118 | +print('\nDeleting all objects in bucket {}.'.format(bucket_name)) |
| 119 | +delete_responses = bucket.objects.delete() |
| 120 | +for delete_response in delete_responses: |
| 121 | + for deleted in delete_response['Deleted']: |
| 122 | + print('\t Deleted: {}'.format(deleted['Key'])) |
65 | 123 |
|
66 |
| -try: input = raw_input |
67 |
| -except NameError: pass |
68 |
| -input("Press enter to delete both the object and the bucket...") |
| 124 | +# Now that the bucket is empty, let's delete the bucket. |
69 | 125 |
|
70 |
| -# Buckets cannot be deleted unless they're empty. Since we still have a |
71 |
| -# reference to the key (object), we can just delete it. |
72 |
| -print("Deleting the object.") |
73 |
| -k.delete() |
| 126 | +print('\nDeleting the bucket.') |
| 127 | +bucket.delete() |
74 | 128 |
|
75 |
| -# Now that the bucket is empty, we can delete it. |
76 |
| -print("Deleting the bucket.") |
77 |
| -s3.delete_bucket(bucket_name) |
| 129 | +# For more details on what you can do with boto3 and Amazon S3, see the API |
| 130 | +# reference page: |
| 131 | +# https://boto3.readthedocs.org/en/latest/reference/services/s3.html |
0 commit comments