boto3 - 使用Python访问AWS S3 (02)

Boto3 官方文档:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html


使用Python访问AWS S3
AWS配置
访问S3需要aws_access_key_id和aws_secret_access_key。在boto3提供的API中,可以指定这两个值。
但是把安全相关的这些信息放到代码中并不是一个好的选择。所以,我们通过awscli配置,将其存放到~/.aws/credentials。
boto3的API在访问S3时,会自动从该文件中读取相关信息。

awscli可以通过pip install awscli进行安装。

——————————————————————————————————————————————————————————————————————————————————————————————————————

# 前置条件,配置access_key 和 access_secret 和 区域信息

vim ~/.aws/credentials
[default]
aws_access_key_id = YOUR_ACCESS_KEY
aws_secret_access_key = YOUR_SECRET_KEY
vim ~/.aws/config

[default]
region=your-us-east-1

代码如下(未完待续):

import os
import boto3
from loguru import logger


"""

Boto3 官方文档:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html


使用Python访问AWS S3
AWS配置
访问S3需要aws_access_key_id和aws_secret_access_key。在boto3提供的API中,可以指定这两个值。
但是把安全相关的这些信息放到代码中并不是一个好的选择。所以,我们通过awscli配置,将其存放到~/.aws/credentials。
boto3的API在访问S3时,会自动从该文件中读取相关信息。

awscli可以通过pip install awscli进行安装。

——————————————————————————————————————————————————————————————————————————————————————————————————————

# 前置条件,配置access_key 和 access_secret 和 区域信息

vim ~/.aws/credentials

[default]
aws_access_key_id = YOUR_ACCESS_KEY
aws_secret_access_key = YOUR_SECRET_KEY


vim ~/.aws/config

[default]
region=your-us-east-1

"""

# s3 实例
s3 = boto3.resource('s3')


def print_out_bucket_names():
    """
    列出所有存储桶
    """
    s3_buckets_list = list()
    for bucket in s3.buckets.all():
        s3_buckets_list.append(bucket)
    return s3_buckets_list


def upload_files(bucket_name, path_local, path_s3):
    """
    上传
    """
    logger.info(f'Start upload files.')

    if not upload_single_file(bucket_name, path_local, path_s3):
        logger.error(f'Upload files failed.')

    logger.info(f'Upload files successful.')


def upload_single_file(bucket_name, src_local_path, path_s3):
    """
    上传文件
    """
    try:
        with open(src_local_path, 'rb') as f:
            data = f.read()

        # Key: s3文件路径,例如:test-03/test.jpg
        s3.Bucket(bucket_name).put_object(Key=path_s3, Body=data)
    except Exception as e:
        logger.error(f'Upload data failed. | src: {src_local_path} | Exception: {e}')
        return False
    logger.info(f'Uploading file successful. | src: {src_local_path}')
    return True


def download_zip(bucket_name, path_s3, path_local):
    """
    下载:并保存到本地文件中
    """
    retry = 0
    while retry < 3:  # 下载异常尝试3次
        logger.info(f'Start downloading files.')
        try:
            s3.Object(bucket_name, path_s3).download_file(path_local)
            file_size = os.path.getsize(path_local)
            logger.info(f'Downloading completed. | size: {round(file_size / 1048576, 2)} MB')
            break  # 下载完成后退出重试
        except Exception as e:
            logger.error(f'Download zip failed. | Exception: {e}')
            retry += 1

    if retry >= 3:
        logger.error(f'Download zip failed after max retry.')


# def delete_s3_zip(path_s3, file_name):
#     """
#     删除
#     :param path_s3:
#     :param file_name:
#     :return:
#     """
#     try:
#         copy_source = {'Bucket': BUCKET_NAME, 'Key': path_s3}
#         s3.copy_object(CopySource=copy_source, Bucket=BUCKET_NAME, Key='icache/' + file_name)
#         s3.delete_object(Bucket=BUCKET_NAME, Key=path_s3)
#     except Exception as e:
#         logger.error(f'Delete s3 zip failed. | Exception: {e}')
#
#
def get_files_list(bucket_name, path_name=''):
    """
    查询
    """
    logger.info(f'Start getting files from s3.')
    try:
        bucket = s3.Bucket(bucket_name)
        all_obj = bucket.objects.filter(Prefix=path_name)
    except Exception as e:
        logger.error(f'Get files list failed. | Exception: {e}')
        return

    file_name_list = []
    for obj in all_obj:
        logger.info(f"obj = {obj}")
        file_path = obj.key
        file_name_list.append(file_path)

    logger.info(f'Get file list successful.')

    return file_name_list


def check_exists(bucket_name, path_name):
    """
    检测是否存在
    """
    bucket = s3.Bucket(bucket_name)
    for obj in bucket.objects.filter(Prefix=path_name):
        logger.info(f"--- obj = {obj}")
        if obj.key == path_name:
            return True
    return False


if __name__ == "__main__":
    pass
    # BUCKET_NAME = "aaa-aws-s3-test-x"  # 存储桶名称
    BUCKET_NAME = "aaa-aws-s3-test-y"  # 存储桶名称
    # TODO test 查询 / 上传 / 下载 /删除
    # 列出所有存储桶
    # bucket_list = print_out_bucket_names()
    # logger.info(f"len = {len(bucket_list)} | bucket_list = {bucket_list}")
    # logger.info(bucket_list[0].name)  # 获取name

    # 获取文件列表
    # file_list = get_files_list(BUCKET_NAME, path_name='test-01')
    # logger.info(f"len = {len(file_list)} | file_list = {file_list}")

    # 上传
    # path_local = '/Users/test/local_file_path/虎式03的副本.jpeg'
    # path_s3 = 'test-03/03.jpeg'
    # upload_files(BUCKET_NAME, path_local, path_s3)

    # TODO 使用upload_file
    # TODO 使用upload_fileobj
    # TODO 使用put() method

    # 下载
    # path_s3 = 'test-03/虎式03的副本.jpeg'
    # path_local = '/Users/test/download_from_s3/03.jpeg'
    # download_zip(BUCKET_NAME, path_s3, path_local)

    # TODO 也可以下载file object,保存到buffer中,以避免不必要的文件读写。
    # TODO 如果想把S3作为一个read stream,可以使用Object的get() method.

    # 检测是否存在
    # path_name = 'test-03/03.jpeg'
    # check_res = check_exists(BUCKET_NAME, path_name)
    # logger.info(f"check_res = {check_res}")

    # TODO 删除
    pass

    """批量清除桶中的所有内容 - 慎用"""
    # S3 delete everything in `my-bucket`
    """s3.Bucket(BUCKET_NAME).objects.delete()"""


 

你可能感兴趣的:(AWS,AWS,S3,Boto3)