diff --git a/CMakeLists.txt b/CMakeLists.txt index 21e09c5e47daff4844caf564a5348cd4da224405..d25f3eabae58b73381e4ec5b9764296d55d9d854 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,424 +1,81 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0. -# - -cmake_minimum_required(VERSION 3.13 FATAL_ERROR) - -option(LEGACY_BUILD "If enabled, the SDK will use 1.11.0 version of CMake files to build" ON) -if (LEGACY_BUILD) - message(WARNING "In 1.11 releases, we are releasing experimental alternative building mode." - "By setting -DLEGACY_MODE=OFF you can test our advances in modern CMake building and " - "provide early feedback. " - "The legacy support is set by default in 1.11, when you complete build updating scripts please " - "update the build flags as mentioned in README.md and set -DLEGACY_BUILD=OFF. " - "The legacy support will be removed at 1.12.0 release.") - - if (POLICY CMP0077) - cmake_policy(SET CMP0077 OLD) # CMP0077: option() honors normal variables. Introduced in 3.13 - endif () - - get_filename_component(AWS_NATIVE_SDK_ROOT "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) - - # Cmake invocation variables: - # BUILD_ONLY - only build project identified by this variable, a semi-colon delimited list, if this is set we will build only the projects listed. Core will always be built as will its unit tests. - # Also if a high level client is specified then we will build its dependencies as well. If a project has tests, the tests will be built. - # ADD_CUSTOM_CLIENTS - semi-colon delimited list of format serviceName=,version=;serviceName2=,version= - # to use these arguments, you should add the api definition .normal.json file for your service to the api-description folder in the generator. - # NDK_DIR - directory where the android NDK is installed; if not set, the location will be read from the ANDROID_NDK environment variable - # CUSTOM_PLATFORM_DIR - directory where custom platform scripts, modules, and source resides - # AWS_SDK_ADDITIONAL_LIBRARIES - names of additional libraries to link into aws-cpp-sdk-core in order to support unusual/unanticipated linking setups (static curl against static-something-other-than-openssl for example) - - option(ENABLE_UNITY_BUILD "If enabled, the SDK will be built using a single unified .cpp file for each service library. Reduces the size of static library binaries on Windows and Linux" ON) - option(MINIMIZE_SIZE "If enabled, the SDK will be built via a unity aggregation process that results in smaller static libraries; additionally, release binaries will favor size optimizations over speed" OFF) - option(BUILD_SHARED_LIBS "If enabled, all aws sdk libraries will be build as shared objects; otherwise all Aws libraries will be built as static objects" ON) - option(FORCE_SHARED_CRT "If enabled, will unconditionally link the standard libraries in dynamically, otherwise the standard library will be linked in based on the BUILD_SHARED_LIBS setting" ON) - option(SIMPLE_INSTALL "If enabled, removes all the additional indirection (platform/cpu/config) in the bin and lib directories on the install step" ON) - option(USE_CRT_HTTP_CLIENT "If enabled, the common runtime HTTP client will be used, and the legacy systems such as WinHttp and libcurl will not be built or included" OFF) - option(NO_HTTP_CLIENT "If enabled, no platform-default http client will be included in the library. For the library to be used you will need to provide your own platform-specific implementation" OFF) - option(NO_ENCRYPTION "If enabled, no platform-default encryption will be included in the library. For the library to be used you will need to provide your own platform-specific implementations" OFF) - option(USE_IXML_HTTP_REQUEST_2 "If enabled on windows, the com object IXmlHttpRequest2 will be used for the http stack" OFF) - option(ENABLE_RTTI "Flag to enable/disable rtti within the library" ON) - option(ENABLE_TESTING "Flag to enable/disable building unit and integration tests" ON) - option(AUTORUN_UNIT_TESTS "Flag to enable/disable automatically run unit tests after building" ON) - option(ANDROID_BUILD_CURL "When building for Android, should curl be built as well" ON) - option(ANDROID_BUILD_OPENSSL "When building for Android, should Openssl be built as well" ON) - option(ANDROID_BUILD_ZLIB "When building for Android, should Zlib be built as well" ON) - option(FORCE_CURL "Forces usage of the Curl client rather than the default OS-specific api" OFF) - option(ENABLE_ADDRESS_SANITIZER "Flags to enable/disable Address Sanitizer for gcc or clang" OFF) - option(BYPASS_DEFAULT_PROXY "Bypass the machine's default proxy settings when using IXmlHttpRequest2" ON) - option(BUILD_DEPS "Build third-party dependencies" ON) - option(USE_OPENSSL "Set this if you want to use your system's OpenSSL 1.0.2/1.1.1 compatible libcrypto" ON) - option(ENABLE_CURL_LOGGING "If enabled, Curl's internal log will be piped to SDK's logger" ON) - option(ENABLE_HTTP_CLIENT_TESTING "If enabled, corresponding http client test suites will be built and run" OFF) - option(ENABLE_FUNCTIONAL_TESTING "If enabled, clients might be generated based on dummy models, and run functional tests as part of unit tests: aws-cpp-sdk-core-tests" OFF) - option(CUSTOM_MEMORY_MANAGEMENT "If set to ON, generates the sdk project files with custom memory management enabled, otherwise disables it" OFF) - option(REGENERATE_CLIENTS "If set to ON, all clients being built on this run will be regenerated from the api definitions, this option involves some setup of python, java 8+, and maven" OFF) - option(ENABLE_VIRTUAL_OPERATIONS "This option usually works with REGENERATE_CLIENTS. \ - If enabled when doing code generation, operation related functions in service clients will be marked as virtual. \ - If disabled when doing code generation, virtual will not be added to operation functions and service client class will be marked as final. \ - If disabled, SDK will add compiler flags '-ffunction-sections -fdata-sections' for gcc and clang when compiling. \ - You can utilize this feature to work with your linker to reduce binary size of your application on Unix platforms when doing static linking in Release mode." ON) - option(REGENERATE_DEFAULTS "If set to ON, defaults mode configuration will be regenerated from the JSON definitions, this option involves some setup of python, java 8+, and maven" OFF) - option(ENABLE_ZLIB_REQUEST_COMPRESSION "For services that support it, request content will be compressed. On by default if dependency available" ON) - option(DISABLE_INTERNAL_IMDSV1_CALLS "Disables IMDSv1 internal client calls" OFF) - option(BUILD_BENCHMARKS "Enables building the benchmark executable" OFF) - option(BUILD_OPTEL "Enables building the open telemetry implementation of tracing" OFF) - option(AWS_SDK_WARNINGS_ARE_ERRORS "Compiler warning is treated as an error. Try turning this off when observing errors on a new or uncommon compiler" ON) - option(USE_TLS_V1_2 "Set http client to enforce TLS 1.2" ON) - option(USE_TLS_V1_3 "Set http client to enforce TLS 1.3" OFF) - - set(AWS_USER_AGENT_CUSTOMIZATION "" CACHE STRING "User agent extension") - set(AWS_TEST_REGION "US_EAST_1" CACHE STRING "Region to target integration tests against") - set(AWS_AUTORUN_LD_LIBRARY_PATH CACHE STRING "Path to append into LD_LIBRARY_PATH for unit tests autorun by cmake. Set this if custom runtime libs are required for overridden dependencies.") - set(BUILD_ONLY "" CACHE STRING "A semi-colon delimited list of the projects to build") - set(CPP_STANDARD "11" CACHE STRING "Flag to upgrade the C++ standard used. The default is 11. The minimum is 11.") - - get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) - if (NOT ${is_multi_config}) - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Release build by default.") - endif () - if (DISABLE_INTERNAL_IMDSV1_CALLS) - add_definitions(-DDISABLE_IMDSV1) - endif () - if (USE_TLS_V2) - add_definitions(-DENFORCE_TLS_V1_2) - endif () - if (USE_TLS_V3) - add_definitions(-DENFORCE_TLS_V1_3) - endif () - - #From https://stackoverflow.com/questions/18968979/how-to-get-colorized-output-with-cmake - if (NOT WIN32) - string(ASCII 27 Esc) - set(ColourReset "${Esc}[m") - set(ColourBold "${Esc}[1m") - set(Red "${Esc}[31m") - set(Green "${Esc}[32m") - set(Yellow "${Esc}[33m") - set(Blue "${Esc}[34m") - set(Magenta "${Esc}[35m") - set(Cyan "${Esc}[36m") - set(White "${Esc}[37m") - set(BoldRed "${Esc}[1;31m") - set(BoldGreen "${Esc}[1;32m") - set(BoldYellow "${Esc}[1;33m") - set(BoldBlue "${Esc}[1;34m") - set(BoldMagenta "${Esc}[1;35m") - set(BoldCyan "${Esc}[1;36m") - set(BoldWhite "${Esc}[1;37m") - endif () - - if (NOT USE_OPENSSL) - message(WARNING "Turning off USE_OPENSSL will install AWS-LC as replacement of OpenSSL in the system default directory. This is an experimental feature. Do not use if you have an OpenSSL installation in your system already.") - endif () - - # backwards compatibility with old command line params - if ("${STATIC_LINKING}" STREQUAL "1") - set(BUILD_SHARED_LIBS OFF) - endif () - - if (MINIMIZE_SIZE) - message(STATUS "MINIMIZE_SIZE enabled") - set(ENABLE_UNITY_BUILD ON) # MINIMIZE_SIZE always implies UNITY_BUILD - endif () - - set(PYTHON_CMD "python") - - # CMAKE_MODULE_PATH is a CMAKE variable. It contains a list of paths - # which could be used to search CMAKE modules by "include()" or "find_package()", but the default value is empty. - # Add ${CMAKE_INSTALL_LIBDIR}/cmake and ${CMAKE_PREFIX_PATH}/lib/cmake to search list - list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake") - set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") - string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") - list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) - - # include() will "load and run" cmake script - include(resolve_platform) - include(CMakePackageConfigHelpers) - - if (REGENERATE_CLIENTS AND NOT ENABLE_VIRTUAL_OPERATIONS) - if (PLATFORM_LINUX OR PLATFORM_APPLE) - Message(STATUS "${BoldYellow}You are regenerating service client's source code and is turning ENABLE_VIRTUAL_OPERATIONS off. If you are targeting smaller binary size, read description string of ENABLE_VIRTUAL_OPERATIONS.${ColourReset}") - endif () - endif () - - # use response files to prevent command-line-too-big errors for large libraries like iam - set(CMAKE_CXX_USE_RESPONSE_FILE_FOR_OBJECTS 1) - set(CMAKE_CXX_USE_RESPONSE_FILE_FOR_INCLUDES 1) - set(CMAKE_CXX_RESPONSE_FILE_LINK_FLAG "@") - - if (COMMAND apply_pre_project_platform_settings) - apply_pre_project_platform_settings() - endif () - - include(initialize_project_version) - - if (BUILD_SHARED_LIBS OR FORCE_SHARED_CRT) - set(STATIC_CRT OFF) - else () - set(STATIC_CRT ON) - endif () - - # Add Linker search paths to RPATH so as to fix the problem where some linkers can't find cross-compiled dependent libraries in customer paths when linking executables. - set(CMAKE_INSTALL_RPATH_USE_LINK_PATH true) - - # build the sdk targets - project("aws-cpp-sdk-all" VERSION "${PROJECT_VERSION}" LANGUAGES CXX) - - set(Python_ADDITIONAL_VERSIONS 3.7 3.8 3.9 3.10) - find_package(PythonInterp) - set(PYTHON3_CMD ${PYTHON_EXECUTABLE}) - - # ENABLE_ZLIB_REQUEST_COMPRESSION should be ON by default if ZLIB is available - if(ENABLE_ZLIB_REQUEST_COMPRESSION) - find_package(ZLIB QUIET) - if ( NOT ZLIB_FOUND) - set(ENABLE_ZLIB_REQUEST_COMPRESSION - OFF CACHE BOOL - "For services that support it, request content will be compressed. On by default if dependency available" - FORCE) - message(WARNING "ZLIB is not available, it will not be used to compress requests") - else() - #Passing the information that we want zlib request compression support to C++ - add_definitions("-DENABLED_ZLIB_REQUEST_COMPRESSION" "-DENABLED_REQUEST_COMPRESSION") - endif() - endif() - - if (UNIX AND NOT APPLE) - include(GNUInstallDirs) - elseif (NOT DEFINED CMAKE_INSTALL_LIBDIR) - set(CMAKE_INSTALL_LIBDIR "lib") - endif () - - if (DEFINED CMAKE_PREFIX_PATH) - file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) - endif () - - add_definitions("-DAWS_TEST_REGION=${AWS_TEST_REGION}") - if (NOT AWS_USER_AGENT_CUSTOMIZATION STREQUAL "") - message(STATUS "Adding user agent customization ${AWS_USER_AGENT_CUSTOMIZATION}") - add_definitions("-DAWS_USER_AGENT_CUSTOMIZATION=${AWS_USER_AGENT_CUSTOMIZATION}") - endif() - if (DEFINED CMAKE_INSTALL_PREFIX) - file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) - endif () - - # http client, encryption, zlib - include(external_dependencies) - include(build_external) - - if (COMMAND apply_post_project_platform_settings) - apply_post_project_platform_settings() - endif () - - set(CMAKE_CONFIGURATION_TYPES - Debug # Setup for easy debugging. No optimizations. - DebugOpt # An optimized version of Debug. - Release # Fully optimized, no debugging information. - RelWithDebInfo # A debuggable version of Release. - MinSizeRel # Like Release, but optimized for memory rather than speed. - ) - - # build third-party targets - if (BUILD_DEPS) - set(CMAKE_INSTALL_RPATH "$ORIGIN") - list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/crt/aws-crt-cpp/crt/aws-c-common/cmake") - - include(AwsFindPackage) - - set(IN_SOURCE_BUILD ON) - set(BUILD_TESTING_PREV ${BUILD_TESTING}) - set(BUILD_TESTING OFF CACHE BOOL "Disable all tests in dependencies.") - # TODO: Use same BUILD_SHARED_LIBS for Aws Common Runtime dependencies. - # libcurl and aws-sdk-cpp-core may link to different libcrypto, which leads to some issues for shared build. - if (ENABLE_OPENSSL_ENCRYPTION) - set(BUILD_SHARED_LIBS_PREV ${BUILD_SHARED_LIBS}) - set(BUILD_SHARED_LIBS OFF) - endif () - set(CRT_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}) - add_subdirectory(crt/aws-crt-cpp) - set(BUILD_TESTING ${BUILD_TESTING_PREV}) - if (ENABLE_OPENSSL_ENCRYPTION) - set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_PREV}) - endif () - else () - include(AwsFindPackage) - set(IN_SOURCE_BUILD OFF) - endif () - set(AWS_COMMON_RUNTIME_LIBS "aws-crt-cpp;aws-c-auth;aws-c-cal;aws-c-common;aws-c-compression;aws-c-event-stream;aws-c-http;aws-c-io;aws-c-mqtt;aws-c-s3;aws-checksums;aws-c-sdkutils") - - include(compiler_settings) - # Instead of calling functions/macros inside included cmake scripts, we should call them in our main CMakeList.txt - set_msvc_flags() - set_msvc_warnings() - - include(sdks) - - include(utilities) - - if (ENABLE_BCRYPT_ENCRYPTION) - set(CRYPTO_LIBS Bcrypt) - set(CRYPTO_LIBS_ABSTRACT_NAME Bcrypt) - elseif (ENABLE_OPENSSL_ENCRYPTION) - set(CRYPTO_LIBS ${OPENSSL_LIBRARIES} ${ZLIB_LIBRARIES}) - set(CRYPTO_LIBS_ABSTRACT_NAME crypto ssl z) - endif () - - if (ENABLE_CURL_CLIENT) - set(CLIENT_LIBS ${CURL_LIBRARIES}) - set(CLIENT_LIBS_ABSTRACT_NAME curl) - elseif (ENABLE_WINDOWS_CLIENT) - if (USE_IXML_HTTP_REQUEST_2) - set(CLIENT_LIBS msxml6 runtimeobject) - set(CLIENT_LIBS_ABSTRACT_NAME msxml6 runtimeobject) - if (BYPASS_DEFAULT_PROXY) - list(APPEND CLIENT_LIBS winhttp) - list(APPEND CLIENT_LIBS_ABSTRACT_NAME winhttp) - endif () - else () - set(CLIENT_LIBS Wininet winhttp) - set(CLIENT_LIBS_ABSTRACT_NAME Wininet winhttp) - endif () - endif () - - # setup user specified installation directory if any, regardless previous platform default settings - if (CMAKE_INSTALL_BINDIR) - set(BINARY_DIRECTORY "${CMAKE_INSTALL_BINDIR}") - endif () - - if (CMAKE_INSTALL_LIBDIR) - set(LIBRARY_DIRECTORY "${CMAKE_INSTALL_LIBDIR}") - endif () - - if (CMAKE_INSTALL_INCLUDEDIR) - set(INCLUDE_DIRECTORY "${CMAKE_INSTALL_INCLUDEDIR}") - endif () - - if (BUILD_SHARED_LIBS) - set(ARCHIVE_DIRECTORY "${BINARY_DIRECTORY}") - else () - set(ARCHIVE_DIRECTORY "${LIBRARY_DIRECTORY}") - endif () - - if (ENABLE_ADDRESS_SANITIZER) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -g -fno-omit-frame-pointer") - if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.1) - message(STATUS "adding libasan as static explicitly for GCC 7+") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libasan") - endif () - endif () - - include(CheckCXXSymbolExists) - - check_cxx_symbol_exists("pathconf" "unistd.h" HAS_PATHCONF) - if (HAS_PATHCONF) - add_definitions(-DHAS_PATHCONF) - endif () - - check_cxx_symbol_exists("umask" "sys/stat.h" HAS_UMASK) - if (HAS_UMASK) - add_definitions(-DHAS_UMASK) - endif () - - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) - add_definitions(-DLEGACY_GCC) - endif () - - add_definitions("-DAWS_TEST_REGION=${AWS_TEST_REGION}") - - add_sdks() - - # for user friendly cmake usage - include(setup_cmake_find_module) - - # for generating make uninstall target - if (NOT TARGET uninstall) - ADD_CUSTOM_TARGET(uninstall "${CMAKE_COMMAND}" -P "${AWS_NATIVE_SDK_ROOT}/cmake/make_uninstall.cmake") - else () - ADD_CUSTOM_TARGET(uninstall-awssdk "${CMAKE_COMMAND}" -P "${AWS_NATIVE_SDK_ROOT}/cmake/make_uninstall.cmake") - endif () -else () # End of Legacy Build - # -- Preamble -- - message(STATUS "Building with new CMake scripts.") - string(CONCAT DESCRIPTION_STRING "The AWS SDK for C++ provides a modern C++ (standard version C++11 or later) " - "interface for Amazon Web Services (AWS).") - - list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake") - - find_package(Git QUIET) # Adding development helper tools as git_hash built when available. - - include(project_version) - obtain_project_version(SDK_PROJECT_VERSION aws-cpp-sdk_GIT_HASH) - - project("aws-cpp-sdk" - LANGUAGES CXX - VERSION ${SDK_PROJECT_VERSION} - DESCRIPTION ${DESCRIPTION_STRING} - HOMEPAGE_URL "https://docs.aws.amazon.com/sdk-for-cpp" - ) - include(CTest) - - # -- Project wide setup -- - # Setting C++ minimum requirements - set(CMAKE_CXX_STANDARD 11) - set(CMAKE_CXX_EXTENSIONS OFF) - set(CMAKE_CXX_STANDARD_REQUIRED ON) - - # Setting flags for telling compiler this is a non-legacy build - add_definitions(-DNON_LEGACY_BUILD) - - # Setting build to hide symbols in targets by default - set(CMAKE_CXX_VISIBILITY_PRESET hidden) - set(CMAKE_VISIBILITY_INLINES_HIDDEN YES) - - # Preventing writes to package registry by default - set(CMAKE_EXPORT_NO_PACKAGE_REGISTRY YES) - - # Validating config type and setting default if needed - get_property(is_multi_conf_build GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) - if (NOT is_multi_conf_build) - set(allowed_build_types Debug Release RelWithDebInfo MinSizeRel) - # cmake-gui helper - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "${allowed_build_types}") - if (NOT CMAKE_BUILD_TYPE) - message(STATUS "Setting build type to 'RelWithDebInfo' as none was specified.") - set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of build." FORCE) - elseif (NOT CMAKE_BUILD_TYPE IN_LIST allowed_build_types) - message(FATAL_ERROR "Unknown build type: ${CMAKE_BUILD_TYPE}") - endif () - endif () - - # Options definition - option(BUILD_TESTING "If enabled, the SDK will include tests in the build" OFF) - - # Next to be included - # # -- Dependencies -- - # include(dependencies) - - # Configuring the encryption tools used - - # # -- main build targets -- - # add_subdirectory(src) - # add_subdirectory(generated) - - # -- Tests and packaging if running this as top project -- - # if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) - # # Testing Dependencies - # if (BUILD_TESTING) - # add_subdirectory(tests) - # endif () - # add_subdirectory(packaging) - # endif () - - # Adding integration tests build and run - # Adding end-points tests build and run - # Add support for static analysis - # Building client libraries. - # Doc generation review - # Add support support for old SDK build flags - # Add previously available options. - - message(WARNING "This is work in progress build script. No SDK is built so far." - "If you need to build the SDK, you need to use LEGACY_BUILD mode at this time. " - ) -endif () +cmake_minimum_required(VERSION 3.10) +project(aws-s3-c) +set(AWS_S3_C_SRC + aws-s3-c.cpp) + +set(AWS_S3_TEST + test_aws_s3.cpp) + +add_library(aws-s3-c SHARED ${AWS_S3_C_SRC}) + +target_include_directories(aws-s3-c PUBLIC ${PROJECT_SOURCE_DIR}/deps/include) + +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + target_link_libraries(aws-s3-c PUBLIC -Wl,--start-group + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-crt-cpp.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-auth.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-common.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-sdkutils.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-cal.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-compression.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-checksums.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-io.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-s3.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-mqtt.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-http.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-c-event-stream.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-cpp-sdk-core.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libaws-cpp-sdk-s3.a + ${PROJECT_SOURCE_DIR}/deps/lib64/libs2n.a + -Wl,--end-group + ) + if (CURL_LIBRARY_PATH) + target_link_libraries(aws-s3-c PUBLIC ${CURL_LIBRARY_PATH}/libcurl.a) + else() + message(FATAL_ERROR "CURL_LIBRARY_PATH is needed") + endif() + + if (OPENSSL_LIBRARY_PATH) + target_link_libraries(aws-s3-c PUBLIC ${OPENSSL_LIBRARY_PATH}/libcrypto.so.1.1) + target_link_libraries(aws-s3-c PUBLIC ${OPENSSL_LIBRARY_PATH}/libssl.so.1.1) + else() + message(FATAL_ERROR "OPENSSL_LIBRARY_PATH is needed") + endif() + target_link_libraries(aws-s3-c PUBLIC z) + target_link_libraries(aws-s3-c PUBLIC m -pthread) +else() + target_link_libraries( aws-s3-c PUBLIC + ${PROJECT_SOURCE_DIR}/deps/lib/aws-crt-cpp.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-auth.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-common.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-sdkutils.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-cal.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-compression.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-checksums.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-io.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-s3.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-mqtt.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-http.lib + ${PROJECT_SOURCE_DIR}/deps/lib/aws-c-event-stream.lib + ${PROJECT_SOURCE_DIR}/deps/bin/aws-cpp-sdk-core.lib + ${PROJECT_SOURCE_DIR}/deps/bin/aws-cpp-sdk-s3.lib + wsock32 + version + Bcrypt + userenv + ws2_32 + Shlwapi + crypt32 + Secur32 + Ncrypt) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_WINDOWS_DLL_SEMANTICS") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_IMPORT_EXPORT") +endif() + +add_executable(ut_aws_s3 ${AWS_S3_TEST}) +target_link_libraries(ut_aws_s3 dl) +target_link_libraries(ut_aws_s3 aws-s3-c) + +SET(CMAKE_INSTALL_PREFIX ${PROJECT_SOURCE_DIR}/install) +install(FILES aws-s3-c.h DESTINATION ${CMAKE_INSTALL_PREFIX}/include) +install(TARGETS aws-s3-c LIBRARY DESTINATION lib) diff --git a/README.md b/README.md index aee00f3ea5a98cb7ff03f3d043a4d4a616d042c3..9872388e52e1462317ef0077021d4208d27aa9c0 100644 --- a/README.md +++ b/README.md @@ -1,155 +1,90 @@ -# AWS SDK for C++ -The AWS SDK for C++ provides a modern C++ (version C++ 11 or later) interface for Amazon Web Services (AWS). It is meant to be performant and fully functioning with low- and high-level SDKs, while minimizing dependencies and providing platform portability (Windows, OSX, Linux, and mobile). +# 版本说明 -AWS SDK for C++ is in now in General Availability and recommended for production use. We invite our customers to join -the development efforts by submitting pull requests and sending us feedback and ideas via GitHub Issues. + s3-sdk-cpp 使用的版本是1.11.140 -## Version 1.11 is now Available! + 附加修复: + https://github.com/aws/aws-sdk-cpp/commit/314c1d73f42194fb956b173744c08b514566efcc -This release introduces a refactored Asynchronous API and restructures the File Hierarchy of the project source code. Additionally, the minimum required version of cmake was raised to 3.13. -* Asynchronous API refactoring is a partially breaking, backward incompatible change: all client methods such as OperationAsync and OperationCallable are no longer virtual methods but instead are conditionally compiled template methods. Having these methods as templates reduces the total binary size of the SDK by 40%. Individual client binary size may vary. In addition, it reduces build time of the SDK by up to 50% (release, unity build, SDK clients only). - A code change may be required if your code inherits SDK’s Client classes and overrides the virtual async methods. Regular synchronous/blocking call methods are still available for override. - Code changes are not required and SDK API are backward compatible if virtual async methods were not overwritten before. -* Scripts and IDE project files not using the provided cmake build infrastructure must be reviewed and updated to reflect source tree changes. +# Linux编译 -All CRT libraries are git submodules of SDK for C++. It requires changes in git syntax to get all source code. -* New users: If you haven't downloaded the source code for SDK for C++, you can get all git submodules recursively by: - ``` - git clone --recurse-submodules https://github.com/aws/aws-sdk-cpp - ``` -* Existing users: If you’ve already downloaded source code for SDK for C++, e.g. in directory `aws-sdk-cpp`, you can update the git submodule by: - ``` - cd aws-sdk-cpp - git checkout main - git pull origin main - git submodule update --init --recursive - ``` -* Alternatively, if you downloaded the code bundle from GitHub website and have no installation of git, you can download all the dependencies running the `prefetch_crt_dependency.sh` script from the root of the repository. It will download bundles of all dependencies from github website using curl and expand them in the right locations. +1. 编译openssl + - ./config '-Wl,-rpath,$(LIBRPATH)' --prefix="/var/lib/jenkins/openssl-OpenSSL_1_1_1q/install" + - make -j4 + - make install +2. 编译curl + - ./configure CFLAGS=-fPIC --without-libpsl --without-brotli --without-zstd --with-ssl="/var/lib/jenkins/openssl-OpenSSL_1_1_1q/install" --prefix=/var/lib/jenkins/curl-7.84.0/install + - make -j4 + - make install + +3. 编译s3-sdk-cpp + - 下载aws-s3-sdk-c工程 + - 将压缩包aws-sdk-cpp.1.11.140.tar.gz解压到aws-s3-sdk-c目录下。 + - 修改build.sh,将openssl和curl安装路径按上述进行调整。 + - 注释掉./prefetch_crt_dependency.sh + - 执行sh build.sh,如失败先清掉aws-sdk-cpp路径下的build目录。 -See Wiki page [Improving S3 Throughput with AWS SDK for CPP v1.9](https://github.com/aws/aws-sdk-cpp/wiki/Improving-S3-Throughput-with-AWS-SDK-for-CPP-v1.9) for more details, and create a new [issue](https://github.com/aws/aws-sdk-cpp/issues/new/choose) or [pull request](https://github.com/aws/aws-sdk-cpp/compare) if you have any feedback on this new version. + 注意arm下可能遇到无法运行测试的问题,可跳过测试用例。 -## API Reference Docs +4. 编译aws-s3-sdk-c + - mkdir build + - cd build + - cmake -DCURL_LIBRARY_PATH=/var/lib/jenkins/curl-7.84.0/install/lib/ -DOPENSSL_LIBRARY_PATH=/var/lib/jenkins/openssl-OpenSSL_1_1_1q/install/lib/ ../ + - make -The AWS SDK C++ recently re-formatted their API docs to be more modularized for easier navigation. Please update any old bookmarks that you may have. +5. 测试 -The root index of the documents can be found at https://sdk.amazonaws.com/cpp/api/LATEST/index.html + 运行ut_aws_s3,需根据测试的S3环境调整测试文件中的url,ak,sk等信息 + +6. 打包 -from here each service specific documentation can be found under the `modules` tab. Upon opening one of these, you will see the associated class list of the client, including a links to the `core` module. + tar -zcvf aws-s3-sdk-c-1.11.140.tar.gz include/ lib/ + arm,ppc平台相应调整包名 + +# windows编译 -## Upgrade Your SDK to Get Latest Security Patches -The AWS SDK for C++ has a dependency on cJSON. This dependency was updated to version 1.7.14 in the recent SDK updates. We would recommend to upgrade your SDK to version 1.9.67 for 1.9.x or 1.8.187 for 1.8.x. Thank @dkalinowski for reporting this issue: https://github.com/aws/aws-sdk-cpp/issues/1594 +1. 编译curl + windows下参考源码目录下的winbuild/README + nmake /f Makefile.vc mode=dll SSL_PATH=D:\tmp\openssl\libcrypto-1.1.1L-win\(使用crypto的路径) -__Jump To:__ -* [Getting Started](#Getting-Started) -* [Issues and Contributions](#issues-and-contributions) -* [Getting Help](#Getting-Help) -* [Using the SDK and Other Topics](#Using-the-SDK-and-Other-Topics) +2. 编译s3-sdk-cpp -# Getting Started + - mkdir build + - cd build + - "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin\cmake.exe" ..\ -DCMAKE_BUILD_TYPE=Release -DBUILD_ONLY="s3" -DENABLE_UNITY_BUILD=OFF -DFORCE_SHARED_CRT=ON -DBUILD_SHARED_LIBS=OFF -DFORCE_CURL=ON -DCURL_INCLUDE_DIR='D:/tmp/curl-7.84.0/curl-7.84.0/builds/libcurl-7.84.0-win/include/' -DCURL_LIBRARY='D:/tmp/curl-7.84.0/curl-7.84.0/builds/libcurl-7.84.0-win/lib/libcurl.lib' -DCMAKE_INSTALL_PREFIX="D:\tmp\aws-s3-sdk-c-master\install\" + - "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe" ALL_BUILD.vcxproj -p:Configuration=Release + - "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe" INSTALL.vcxproj -p:Configuration=Release + + CMAKE_INSTALL_PREFIX设置为aws-s3-sdk-c下的deps目录。MSBuild.exe等路径根据实际情况调整。 + + 这里使用的动态库版本,静态库在编译aws-s3-sdk-c时遇到链接问题。 -## Building the SDK: +3. 编译aws-s3-sdk-c + - "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin\cmake.exe" ..\ + - "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe" ALL_BUILD.vcxproj -p:Configuration=Release + - "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe" INSTALL.vcxproj -p:Configuration=Release -### Minimum Requirements: -* Visual Studio 2015 or later -* OR GNU Compiler Collection (GCC) 4.9 or later -* OR Clang 3.3 or later -* 4GB of RAM - * 4GB of RAM is required to build some of the larger clients. The SDK build may fail on EC2 instance types t2.micro, t2.small and other small instance types due to insufficient memory. +4. 打包 -### Building From Source: + 将aws-s3-c.dll与aws其他动态库一起打包 -#### To create an **out-of-source build**: -1. Install CMake and the relevant build tools for your platform. Ensure these are available in your executable path. -2. Create your build directory. Replace with your build directory name: +5. 测试 -3. Build the project: + 根据自己环境配置,调整test_aws_s3中的s3配置信息。 + 执行ut_aws_s3.exe - ```sh - cd - cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_PREFIX_PATH= - cmake --build . --config=Debug - cmake --install . --config=Debug - ``` +# 使用 -#### Other Dependencies: -To compile in Linux, you must have the header files for libcurl, libopenssl. The packages are typically available in your package manager. +可参考测试用例。 -Debian based Linux distributions example: - `sudo apt-get install libcurl-dev` +# 注意事项 -RPM based Linux distributions example: - `sudo [yum|dnf|zypper] install libcurl-devel` +1. 目前提交构建并不保证Arm和loong平台的编译,提供相关包时注意确保对应的平台可以编译通过。 +2. 注意编译环境的glibc版本,x86,arm应该是2.17,loong目前是2.28 + +# FAQ -### Building for Android -To build for Android, add `-DTARGET_ARCH=ANDROID` to your cmake command line. Currently we support Android APIs from 19 to 28 with Android NDK 19c and we are using build-in cmake toolchain file supplied by Android NDK, assuming you have the appropriate environment variables (ANDROID_NDK) set. - -##### Android on Windows -Building for Android on Windows requires some additional setup. In particular, you will need to run cmake from a Visual Studio developer command prompt (2015 or higher). Additionally, you will need 'git' and 'patch' in your path. If you have git installed on a Windows system, then patch is likely found in a sibling directory (.../Git/usr/bin/). Once you've verified these requirements, your cmake command line will change slightly to use nmake: - - ```sh - cmake -G "NMake Makefiles" `-DTARGET_ARCH=ANDROID` .. - ``` - -Nmake builds targets in a serial fashion. To make things quicker, we recommend installing JOM as an alternative to nmake and then changing the cmake invocation to: - - ```sh - cmake -G "NMake Makefiles JOM" `-DTARGET_ARCH=ANDROID` .. - ``` - -### Building for Docker - -To build for Docker, ensure your container meets the [minimum requirements](#minimum-requirements). By default, Docker Desktop is set to use 2 GB runtime memory. We have provided [Dockerfiles](https://github.com/aws/aws-sdk-cpp/tree/master/CI/docker-file) as templates for building the SDK in a container. - - -### Building and running an app on EC2 -Checkout this walk through on how to set up an environment and build the [AWS SDK for C++ on an EC2 instance](https://github.com/aws/aws-sdk-cpp/wiki/Building-the-SDK-from-source-on-EC2). - -### Building aws-sdk-cpp - Using vcpkg - -You can download and install aws-sdk-cpp using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: - - git clone https://github.com/Microsoft/vcpkg.git - cd vcpkg - ./bootstrap-vcpkg.sh - ./vcpkg integrate install - ./vcpkg install aws-sdk-cpp - -The aws-sdk-cpp port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. - -# Issues and Contributions -We welcome all kinds of contributions, check [this guideline](./CONTRIBUTING.md) to learn how you can contribute or report issues. - -# Maintenance and support for SDK major versions - -For information about maintenance and support for SDK major versions and our underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide - -* [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html) -* [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html) - - -# Getting Help - -The best way to interact with our team is through GitHub. You can [open an issue](https://github.com/aws/aws-sdk-cpp/issues/new/choose) and choose from one of our templates for guidance, bug reports, or feature requests. - -You may also find help on community resources such as [StackOverFlow](https://stackoverflow.com/) with the tag [#aws-sdk-cpp](https://stackoverflow.com/questions/tagged/aws-sdk-cpp). If you have a support plan with [AWS Support](https://aws.amazon.com/premiumsupport/), you can also create a new support case. - -Please make sure to check out our resources too before opening an issue: -* Our [Developer Guide](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/welcome.html) and [API reference](http://sdk.amazonaws.com/cpp/api/LATEST/index.html) -* Our [Changelog](./CHANGELOG.md) for recent breaking changes. -* Our [Contribute](./CONTRIBUTING.md) guide. -* Our [samples repo](https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/cpp). - - -# Using the SDK and Other Topics -* [Using the SDK](./docs/SDK_usage_guide.md) -* [CMake Parameters](./docs/CMake_Parameters.md) -* [Credentials Providers](./docs/Credentials_Providers.md) -* [Client Configuration Parameters](./docs/ClientConfiguration_Parameters.md) -* [Service Client](./docs/Service_Client.md) -* [Memory Management](./docs/Memory_Management.md) -* [Advanced Topics](./docs/Advanced_topics.md) -* [Add as CMake external project](./docs/CMake_External_Project.md) -* [Coding Standards](./docs/CODING_STANDARDS.md) -* [License](./LICENSE) -* [Code of Conduct](./CODE_OF_CONDUCT.md) +1. curl编译问题:implicit declaration of function 'sched_yield' + loong/PPC下编译到该问题,解决方法参考: + https://blog.csdn.net/wquasdf/article/details/126028387 + diff --git a/aws-s3-c.cpp b/aws-s3-c.cpp new file mode 100644 index 0000000000000000000000000000000000000000..01e4f9dde0c2110f2519d8afe6eb6b2f3477aa14 --- /dev/null +++ b/aws-s3-c.cpp @@ -0,0 +1,638 @@ +#include "aws-s3-c.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma comment(lib, "../deps/bin/aws-cpp-sdk-core.lib") +#pragma comment(lib, "../deps/bin/aws-cpp-sdk-s3.lib") +#endif + +Aws::Utils::Logging::LogLevel logLevelCodToAws(AwsS3LogLevel logLevel); + +#define S3_MAX_LIST_OBJECTS 256 +#define S3_MAX_SINGLE_PART_SIZE (8 * 1024 * 1024) +#define S3_MIN_UPLOAD_PART_SIZE (8 * 1024 * 1024) +#define S3_FILENAME_BUFFER_SIZE + +AwsS3Loger gS3Logger = {0}; +AwsS3ErrorSetter gS3ErrorSetter = {0}; + +namespace libs3 +{ +class CodMemoryManager : public Aws::Utils::Memory::MemorySystemInterface +{ + public: + CodMemoryManager() {} + CodMemoryManager(AwsS3MemAllocator* allocator) { s3Allocator = allocator; } + virtual ~CodMemoryManager() {} + virtual void Begin() override{}; + virtual void End() override{}; + virtual void* AllocateMemory(std::size_t blockSize, std::size_t alignment, + const char* allocationTag = nullptr) override + { + return s3Allocator->malloc(s3Allocator->ctx, blockSize, (uint32_t)alignment); + } + + virtual void FreeMemory(void* memoryPtr) override { s3Allocator->free(s3Allocator->ctx, memoryPtr); } + + private: + AwsS3MemAllocator* s3Allocator; +}; + +class AwsS3Env +{ + public: + AwsS3Env(AwsS3EnvOptions& option); + ~AwsS3Env(); + AwsS3EnvOptions& getOption() { return instanceOption; } + + private: + AwsS3EnvOptions instanceOption; + Aws::SDKOptions options; + CodMemoryManager memoryManager; +}; + +class Handle +{ + public: + Handle(){}; + virtual ~Handle(){}; + virtual AwsS3Result Put(const char* object, const char* buf, uint64_t size) = 0; + virtual AwsS3Result Get(const char* object, uint64_t offset, uint64_t len, char* buf, uint64_t* actualSize) = 0; + virtual AwsS3Result Delete(const char* object) = 0; + virtual AwsS3Result Head(const char* object, uint64_t* size) = 0; + virtual AwsS3Result CreateDir(const char* prefix) = 0; + virtual AwsS3Result DeleteWithPrefix(const char* prefix) = 0; + virtual AwsS3Result IsEmpty(const char* prefix, bool* isEmpty) = 0; +}; + +class SteamWriter +{ + public: + SteamWriter(libs3::Handle* handle) { this->handle = handle; }; + virtual ~SteamWriter(){}; + virtual AwsS3Result Write(const char* buf, uint64_t size) = 0; + virtual AwsS3Result Flush() = 0; + + protected: + libs3::Handle* handle; +}; + +class AwsS3Handle : public Handle +{ + public: + AwsS3Handle(AwsS3BucketConf* conf, bool useHttps, bool useSSL); + virtual ~AwsS3Handle(){}; + virtual AwsS3Result Put(const char* object, const char* buf, uint64_t size) override; + virtual AwsS3Result Get(const char* object, uint64_t offset, uint64_t len, char* buf, + uint64_t* actualSize) override; + virtual AwsS3Result Delete(const char* object) override; + virtual AwsS3Result Head(const char* object, uint64_t* size) override; + virtual AwsS3Result CreateDir(const char* prefix) override { return S3_SUCCESS; }; + virtual AwsS3Result DeleteWithPrefix(const char* prefix) override; + virtual AwsS3Result IsEmpty(const char* prefix, bool* isEmpty) override; + Aws::S3::S3Client& GetClient() { return client; } + Aws::String& GetBucket() { return bucket; } + + private: + Aws::S3::S3Client client; + Aws::String bucket; +}; + +class AwsS3SteamWriter : SteamWriter +{ + public: + AwsS3SteamWriter(libs3::AwsS3Handle* handle, const char* name) : SteamWriter(handle) + { + this->fileName = name; + this->flushed = false; + }; + virtual ~AwsS3SteamWriter(){}; + virtual AwsS3Result Write(const char* buf, uint64_t size); + virtual AwsS3Result Flush(); + + private: + Aws::String fileName; + Aws::String writeBuffer; + Aws::String uploadId; + std::vector etags; + bool flushed; +}; + +class CodLogSystem : public Aws::Utils::Logging::FormattedLogSystem +{ + public: + // todo: default too many error log, maybe to check internal action of s3 + // CodLogSystem() : FormattedLogSystem(logLevelCodToAws(codGetLogLevel())) {} + CodLogSystem() : FormattedLogSystem(Aws::Utils::Logging::LogLevel::Fatal) {} + virtual ~CodLogSystem(){}; + virtual void LogStream(Aws::Utils::Logging::LogLevel logLevel, const char* tag, + const Aws::OStringStream& messageStream) override{ + // todo :fill me + }; + + protected: + virtual void ProcessFormattedStatement(Aws::String&& statement) override; + virtual void Flush() override; +}; + +class CodCRTLogSystem : public Aws::Utils::Logging::DefaultCRTLogSystem +{ + public: + CodCRTLogSystem(Aws::Utils::Logging::LogLevel logLevel) : DefaultCRTLogSystem(logLevel) {} + virtual ~CodCRTLogSystem(){}; + + protected: + virtual void Log(Aws::Utils::Logging::LogLevel logLevel, const char* subjectName, const char* formatStr, + va_list args) override; +}; + +} // namespace libs3 + +AwsS3LogLevel logLevelAwsToCod(Aws::Utils::Logging::LogLevel logLevel) +{ + AwsS3LogLevel level; + switch (logLevel) { + case Aws::Utils::Logging::LogLevel::Fatal: + level = S3_LOG_LEVEL_FATAL; + break; + case Aws::Utils::Logging::LogLevel::Error: + level = S3_LOG_LEVEL_ERROR; + break; + case Aws::Utils::Logging::LogLevel::Warn: + level = S3_LOG_LEVEL_WARN; + break; + case Aws::Utils::Logging::LogLevel::Info: + level = S3_LOG_LEVEL_INFO; + break; + case Aws::Utils::Logging::LogLevel::Debug: + level = S3_LOG_LEVEL_DEBUG; + break; + case Aws::Utils::Logging::LogLevel::Trace: + level = S3_LOG_LEVEL_TRACE; + break; + default: + level = S3_LOG_OFF; + break; + } + return level; +} + +Aws::Utils::Logging::LogLevel logLevelCodToAws(AwsS3LogLevel logLevel) +{ + Aws::Utils::Logging::LogLevel level; + switch (logLevel) { + case S3_LOG_LEVEL_FATAL: + level = Aws::Utils::Logging::LogLevel::Fatal; + break; + case S3_LOG_LEVEL_ERROR: + level = Aws::Utils::Logging::LogLevel::Error; + break; + case S3_LOG_LEVEL_WARN: + level = Aws::Utils::Logging::LogLevel::Warn; + break; + case S3_LOG_LEVEL_INFO: + level = Aws::Utils::Logging::LogLevel::Info; + break; + case S3_LOG_LEVEL_DEBUG: + level = Aws::Utils::Logging::LogLevel::Debug; + break; + case S3_LOG_LEVEL_TRACE: + level = Aws::Utils::Logging::LogLevel::Trace; + break; + default: + level = Aws::Utils::Logging::LogLevel::Off; + break; + } + return level; +} + +void libs3::CodLogSystem::ProcessFormattedStatement(Aws::String&& statement) +{ + gS3Logger.logFunc(gS3Logger.ctx, logLevelAwsToCod(GetLogLevel()), statement.c_str()); +} + +void libs3::CodCRTLogSystem::Log(Aws::Utils::Logging::LogLevel logLevel, const char* subjectName, const char* formatStr, + va_list args) +{ + gS3Logger.logFunc(gS3Logger.ctx, logLevelAwsToCod(GetLogLevel()), formatStr, args); +} + +void libs3::CodLogSystem::Flush() {} + +libs3::AwsS3Env::AwsS3Env(AwsS3EnvOptions& option) +{ + options.loggingOptions.logger_create_fn = []() { return Aws::MakeShared("CodLogSystem"); }; + // crt log is useless + // options.loggingOptions.crt_logger_create_fn = []() { + // return Aws::MakeShared("CodCRTLogSystem", Aws::Utils::Logging::LogLevel::Info); + // }; + if (option.withAllocator) { + memoryManager = libs3::CodMemoryManager(&option.allocator); + Aws::Utils::Memory::InitializeAWSMemorySystem(memoryManager); + } + // todo: check log level take affect or not + if (option.logLevel < S3_LOG_LEVEL_INFO) { + option.logLevel = S3_LOG_LEVEL_INFO; + } + options.loggingOptions.logLevel = logLevelCodToAws(S3_LOG_LEVEL_ERROR); + // options.cryptoOptions.initAndCleanupOpenSSL = true; + instanceOption = option; + Aws::InitAPI(options); + gS3Logger = option.logger; +} + +libs3::AwsS3Env::~AwsS3Env() +{ + if (instanceOption.withAllocator) { + Aws::Utils::Memory::ShutdownAWSMemorySystem(); + } + Aws::ShutdownAPI(options); +} + +libs3::AwsS3Handle::AwsS3Handle(AwsS3BucketConf* conf, bool useHttps, bool useSSL) : Handle() +{ + Aws::Client::ClientConfiguration cfg; + cfg.endpointOverride = conf->endpoint; + cfg.scheme = useHttps ? Aws::Http::Scheme::HTTPS : Aws::Http::Scheme::HTTP; + cfg.verifySSL = useSSL; + cfg.region = conf->region; + bucket = conf->name; + + Aws::Auth::AWSCredentials cred(conf->ak, conf->sk); + client = Aws::S3::S3Client(cred, cfg, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, false); +} + +AwsS3Result libs3::AwsS3Handle::Put(const char* object, const char* buf, uint64_t size) +{ + Aws::S3::Model::PutObjectRequest request; + + request.SetBucket(bucket); + request.SetKey(object); + request.SetContentLength(size); + // todo: optimize, no need transfer to string + const Aws::String data(buf, size); + request.SetBody(std::make_shared(data)); + Aws::S3::Model::PutObjectOutcome ret = client.PutObject(request); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), (uint32_t)ret.GetError().GetErrorType(), + ret.GetError().GetMessage().c_str()); + return S3_ERROR; + } + + return S3_SUCCESS; +} + +AwsS3Result libs3::AwsS3Handle::Get(const char* object, uint64_t offset, uint64_t len, char* buf, uint64_t* actualSize) +{ + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucket); + request.SetKey(object); + if (len != 0xffffffffffffffff) { + request.SetRange("bytes=" + std::to_string(offset) + "-" + std::to_string(offset + len - 1)); + } + + Aws::S3::Model::GetObjectOutcome ret = client.GetObject(request); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), (uint32_t)ret.GetError().GetErrorType(), + ret.GetError().GetMessage().c_str()); + return S3_ERROR; + } + *actualSize = ret.GetResult().GetContentLength(); + auto& receive = ret.GetResultWithOwnership().GetBody(); + receive.read((char*)buf, len); + return S3_SUCCESS; +} + +AwsS3Result libs3::AwsS3Handle::Delete(const char* object) +{ + Aws::S3::Model::DeleteObjectRequest request; + + request.WithBucket(bucket).WithKey(object); + Aws::S3::Model::DeleteObjectOutcome ret = client.DeleteObject(request); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), (uint32_t)ret.GetError().GetErrorType(), + ret.GetError().GetMessage().c_str()); + } + return S3_SUCCESS; +} + +AwsS3Result libs3::AwsS3Handle::Head(const char* object, uint64_t* size) +{ + Aws::S3::Model::HeadObjectRequest request; + + request.WithBucket(bucket).WithKey(object); + Aws::S3::Model::HeadObjectOutcome ret = client.HeadObject(request); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), (uint32_t)ret.GetError().GetErrorType(), + ret.GetError().GetMessage().c_str()); + + return S3_ERROR; + } + *size = ret.GetResultWithOwnership().GetContentLength(); + return S3_SUCCESS; +} + +AwsS3Result libs3::AwsS3Handle::DeleteWithPrefix(const char* prefix) +{ + Aws::String path = prefix; + if (path.back() != '/') { + path.append("/"); + } + Aws::S3::Model::ListObjectsV2Request request; + Aws::S3::Model::ListObjectsV2Result result; + request.WithBucket(bucket).WithPrefix(path); + request.SetMaxKeys(S3_MAX_LIST_OBJECTS); + + bool dirExist = false; + Aws::S3::Model::DeleteObjectsRequest delete_request; + delete_request.SetBucket(bucket); + do { + auto outcome = client.ListObjectsV2(request); + if (!outcome.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), (uint32_t)outcome.GetError().GetErrorType(), + outcome.GetError().GetMessage().c_str()); + + return S3_ERROR; + } + result = outcome.GetResultWithOwnership(); + dirExist |= !result.GetContents().empty(); + Aws::Vector objects; + objects.reserve(result.GetContents().size()); + for (auto&& obj : result.GetContents()) { + Aws::S3::Model::ObjectIdentifier objectIdent; + objectIdent.SetKey(obj.GetKey()); + objects.push_back(objectIdent); + } + if (!objects.empty()) { + Aws::S3::Model::Delete d; + d.WithObjects(std::move(objects)).WithQuiet(true); + delete_request.SetDelete(std::move(d)); + auto delete_outcome = client.DeleteObjects(delete_request); + if (!delete_outcome.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), + (uint32_t)delete_outcome.GetError().GetErrorType(), + delete_outcome.GetError().GetMessage().c_str()); + + return S3_ERROR; + } + if (!delete_outcome.GetResult().GetErrors().empty()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), + (uint32_t)(uint32_t)Aws::S3::S3Errors::UNKNOWN, "multi upload partial failed"); + return S3_ERROR; + } + } + } while (result.GetIsTruncated()); + + return Delete(path.c_str()); +} + +AwsS3Result libs3::AwsS3Handle::IsEmpty(const char* prefix, bool* isEmpty) +{ + Aws::String path = prefix; + if (path.back() != '/') { + path.append("/"); + } + Aws::S3::Model::ListObjectsV2Request request; + Aws::S3::Model::ListObjectsV2Result result; + request.WithBucket(bucket).WithPrefix(path); + request.SetMaxKeys(S3_MAX_LIST_OBJECTS); + + auto ret = client.ListObjectsV2(request); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, GetBucket().c_str(), (uint32_t)ret.GetError().GetErrorType(), + ret.GetError().GetMessage().c_str()); + return S3_ERROR; + } + result = ret.GetResultWithOwnership(); + *isEmpty = !result.GetContents().size(); + return S3_SUCCESS; +} + +AwsS3Result libs3::AwsS3SteamWriter::Write(const char* buf, uint64_t size) +{ + if (flushed) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, ((libs3::AwsS3Handle*)handle)->GetBucket().c_str(), 1, + "write after flush is not permitted"); + return S3_ERROR; + } + writeBuffer.append(buf, size); + if (uploadId.empty() && writeBuffer.size() > S3_MAX_SINGLE_PART_SIZE) { + Aws::S3::Model::CreateMultipartUploadRequest req; + req.SetBucket(((libs3::AwsS3Handle*)handle)->GetBucket()); + req.SetKey(fileName); + Aws::S3::Model::CreateMultipartUploadOutcome ret = + ((libs3::AwsS3Handle*)handle)->GetClient().CreateMultipartUpload(req); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, ((libs3::AwsS3Handle*)handle)->GetBucket().c_str(), + (uint32_t)ret.GetError().GetErrorType(), ret.GetError().GetMessage().c_str()); + return S3_ERROR; + } + uploadId = ret.GetResult().GetUploadId(); + } + + if (!uploadId.empty() && writeBuffer.size() >= S3_MIN_UPLOAD_PART_SIZE) { + Aws::S3::Model::UploadPartRequest req; + req.SetBucket(((libs3::AwsS3Handle*)handle)->GetBucket()); + req.SetKey(fileName); + req.SetPartNumber(static_cast(etags.size() + 1)); + req.SetUploadId(uploadId); + req.SetContentLength(static_cast(writeBuffer.size())); + req.SetBody(std::make_shared(writeBuffer)); + auto outcome = ((libs3::AwsS3Handle*)handle)->GetClient().UploadPart(req); + if (!outcome.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, ((libs3::AwsS3Handle*)handle)->GetBucket().c_str(), + (uint32_t)outcome.GetError().GetErrorType(), + outcome.GetError().GetMessage().c_str()); + return S3_ERROR; + } + etags.push_back(outcome.GetResult().GetETag()); + writeBuffer.clear(); + } + return S3_SUCCESS; +} + +AwsS3Result libs3::AwsS3SteamWriter::Flush() +{ + if (uploadId.empty()) { + if (writeBuffer.empty()) { + flushed = true; + return S3_SUCCESS; + } + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(((libs3::AwsS3Handle*)handle)->GetBucket()); + request.SetKey(fileName); + request.SetContentLength(writeBuffer.size()); + request.SetBody(std::make_shared(writeBuffer)); + Aws::S3::Model::PutObjectOutcome ret = ((libs3::AwsS3Handle*)handle)->GetClient().PutObject(request); + if (!ret.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, ((libs3::AwsS3Handle*)handle)->GetBucket().c_str(), + (uint32_t)ret.GetError().GetErrorType(), ret.GetError().GetMessage().c_str()); + return S3_ERROR; + } + writeBuffer.clear(); + flushed = true; + return S3_SUCCESS; + } else { + if (!writeBuffer.empty()) { + Aws::S3::Model::UploadPartRequest req; + req.SetBucket(((libs3::AwsS3Handle*)handle)->GetBucket()); + req.SetKey(fileName); + req.SetPartNumber(static_cast(etags.size() + 1)); + req.SetUploadId(uploadId); + req.SetContentLength(static_cast(writeBuffer.size())); + req.SetBody(std::make_shared(writeBuffer)); + auto outcome = ((libs3::AwsS3Handle*)handle)->GetClient().UploadPart(req); + if (!outcome.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, ((libs3::AwsS3Handle*)handle)->GetBucket().c_str(), + (uint32_t)outcome.GetError().GetErrorType(), + outcome.GetError().GetMessage().c_str()); + return S3_ERROR; + } + etags.push_back(outcome.GetResult().GetETag()); + } + + Aws::S3::Model::CompleteMultipartUploadRequest req; + req.SetBucket(((libs3::AwsS3Handle*)handle)->GetBucket()); + req.SetKey(fileName); + req.SetUploadId(uploadId); + Aws::S3::Model::CompletedMultipartUpload multipart_upload; + for (int i = 0, sz = static_cast(etags.size()); i < sz; ++i) { + Aws::S3::Model::CompletedPart part; + multipart_upload.AddParts(part.WithETag(etags[i]).WithPartNumber(i + 1)); + } + req.SetMultipartUpload(multipart_upload); + auto outcome = ((libs3::AwsS3Handle*)handle)->GetClient().CompleteMultipartUpload(req); + if (!outcome.IsSuccess()) { + gS3ErrorSetter.setFunc(gS3ErrorSetter.ctx, ((libs3::AwsS3Handle*)handle)->GetBucket().c_str(), + (uint32_t)outcome.GetError().GetErrorType(), + outcome.GetError().GetMessage().c_str()); + return S3_ERROR; + } + writeBuffer.clear(); + uploadId.clear(); + flushed = true; + } + return S3_SUCCESS; +} + +static void AwsS3SetErrorDummy(void* ctx, const char* bucket, uint32_t errorType, const char* errorMsg) +{ + return; +} + +AwsS3Env* awsS3InitEnv(AwsS3EnvOptions option) +{ + gS3ErrorSetter = option.errorSetter; + if (gS3ErrorSetter.setFunc == NULL) { + gS3ErrorSetter.setFunc = AwsS3SetErrorDummy; + } + gS3Logger = option.logger; + return reinterpret_cast(new libs3::AwsS3Env(option)); +} + +void aswS3ReleaseEnv(AwsS3Env* env) +{ + if (env != NULL) { + libs3::AwsS3Env* inst = (libs3::AwsS3Env*)env; + delete inst; + } +} + +AwsS3Handle* awsS3OpenHandle(AwsS3BucketConf* conf, AwsS3HandleOptions* options) +{ + AwsS3Handle* handle = NULL; + + handle = reinterpret_cast(new libs3::AwsS3Handle(conf, options == NULL ? false : options->useHttps, + options == NULL ? false : options->useSSL)); + return handle; +} + +void awsS3CloseHandle(AwsS3Handle* handle) +{ + libs3::Handle* h = (libs3::Handle*)handle; + delete h; +} + +AwsS3Result awsS3Put(AwsS3Handle* handle, const char* object, const char* buf, uint64_t size) +{ + libs3::Handle* h = (libs3::Handle*)handle; + return h->Put(object, buf, size); +} + +AwsS3Result awsS3Get(AwsS3Handle* handle, const char* object, uint64_t offset, char* buf, uint64_t len, + uint64_t* actualSize) +{ + libs3::Handle* h = (libs3::Handle*)handle; + *actualSize = 0; + return h->Get(object, offset, len, buf, actualSize); +} + +AwsS3Result awsS3Delete(AwsS3Handle* handle, const char* object) +{ + libs3::Handle* h = (libs3::Handle*)handle; + return h->Delete(object); +} + +AwsS3Result awsS3Head(AwsS3Handle* handle, const char* object, uint64_t* size) +{ + libs3::Handle* h = (libs3::Handle*)handle; + return h->Head(object, size); +} + +AwsS3Result awsS3CreateDir(AwsS3Handle* handle, const char* dir) +{ + libs3::Handle* h = (libs3::Handle*)handle; + return h->CreateDir(dir); +} + +AwsS3Result awsS3DeleteDir(AwsS3Handle* handle, const char* dir) +{ + libs3::Handle* h = (libs3::Handle*)handle; + return h->DeleteWithPrefix(dir); +} + +AwsS3Result awsS3IsDirEmpty(AwsS3Handle* handle, const char* dir, bool* isEmpty) +{ + libs3::Handle* h = (libs3::Handle*)handle; + return (AwsS3Result)h->IsEmpty(dir, isEmpty); +} + +AwsS3SteamWriter* awsS3OpenStreamWriter(AwsS3Handle* handle, const char* name) +{ + libs3::Handle* h = (libs3::Handle*)handle; + AwsS3SteamWriter* file = NULL; + + file = reinterpret_cast(new libs3::AwsS3SteamWriter((libs3::AwsS3Handle*)h, name)); + return file; +} + +void awsS3CloseStreamWriter(AwsS3SteamWriter* steamWriter) +{ + libs3::SteamWriter* writer = (libs3::SteamWriter*)steamWriter; + delete writer; +} + +AwsS3Result awsS3WriteStream(AwsS3SteamWriter* steamWriter, const char* buf, uint64_t size) +{ + libs3::SteamWriter* writer = (libs3::SteamWriter*)steamWriter; + return writer->Write(buf, size); +} + +AwsS3Result awsS3FlushStream(AwsS3SteamWriter* steamWriter) +{ + libs3::SteamWriter* writer = (libs3::SteamWriter*)steamWriter; + return writer->Flush(); +} diff --git a/aws-s3-c.h b/aws-s3-c.h new file mode 100644 index 0000000000000000000000000000000000000000..d0dad947f6842891ae857833f4796d30da3e6e80 --- /dev/null +++ b/aws-s3-c.h @@ -0,0 +1,108 @@ +#ifndef COD_AWS_S3_H +#define COD_AWS_S3_H +#include +#include +#include +#include +#include +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_MSC_VER) +#define AWS_S3_EXPORT __declspec(dllexport) +#else +#define AWS_S3_EXPORT +#endif + +#define S3_MAX_KEY_SIZE 1024 + +typedef struct StAwsS3BucketConf { + const char* endpoint; + const char* region; + const char* ak; + const char* sk; + const char* name; +} AwsS3BucketConf; + +typedef struct StAwsS3HandleOptions { + bool useHttps; + bool useSSL; +} AwsS3HandleOptions; + +typedef void* (*AwsS3MallocAlign)(void* ctx, size_t size, uint32_t alignSize); +typedef void (*AwsS3Free)(void* ctx, void* ptr); + +typedef struct StAwsS3MemAllocator { + void* ctx; + AwsS3MallocAlign malloc; + AwsS3Free free; +} AwsS3MemAllocator; + +typedef enum EnS3LogLevel { + S3_LOG_ALL = 1, ///< Output all level + S3_LOG_LEVEL_TRACE = 1, ///< Trace + S3_LOG_LEVEL_DEBUG, ///< Debug + S3_LOG_LEVEL_INFO, ///< Information + S3_LOG_LEVEL_WARN, ///< Warning + S3_LOG_LEVEL_ERROR, ///< Error + S3_LOG_LEVEL_FATAL, ///< Fatal + S3_LOG_OFF ///< Turn off log +} AwsS3LogLevel; + +typedef void (*AwsS3LogFunc)(void* ctx, AwsS3LogLevel level, const char* fmt, ...); + +typedef struct StAwsS3Loger { + void* ctx; + AwsS3LogFunc logFunc; +} AwsS3Loger; + +typedef void (*AwsS3SetErrorFunc)(void* ctx, const char* bucket, uint32_t errorType, const char* errorMsg); +typedef struct StAwsS3ErrorSetter { + void* ctx; + AwsS3SetErrorFunc setFunc; +} AwsS3ErrorSetter; + +typedef struct StAwsS3EnvOptions { + AwsS3LogLevel logLevel; + bool withAllocator; + AwsS3MemAllocator allocator; + AwsS3Loger logger; + AwsS3ErrorSetter errorSetter; +} AwsS3EnvOptions; + +typedef void AwsS3Handle; +typedef void AwsS3Env; +typedef void AwsS3SteamWriter; + +typedef enum EnAwsS3Result { + S3_SUCCESS = 0, + S3_SUCCESS_WITH_INFO = 1, + S3_ERROR = -1, +} AwsS3Result; + +AWS_S3_EXPORT AwsS3Env* awsS3InitEnv(AwsS3EnvOptions option); +AWS_S3_EXPORT void aswS3ReleaseEnv(AwsS3Env* env); +AWS_S3_EXPORT AwsS3Handle* awsS3OpenHandle(AwsS3BucketConf* conf, AwsS3HandleOptions* options); +AWS_S3_EXPORT void awsS3CloseHandle(AwsS3Handle* handle); + +AWS_S3_EXPORT AwsS3Result awsS3CreateDir(AwsS3Handle* handle, const char* dir); +AWS_S3_EXPORT AwsS3Result awsS3DeleteDir(AwsS3Handle* handle, const char* dir); +AWS_S3_EXPORT AwsS3Result awsS3IsDirEmpty(AwsS3Handle* handle, const char* dir, bool* isEmpty); + +AWS_S3_EXPORT AwsS3Result awsS3Put(AwsS3Handle* handle, const char* object, const char* buf, uint64_t size); +AWS_S3_EXPORT AwsS3Result awsS3Get(AwsS3Handle* handle, const char* object, uint64_t offset, char* buf, uint64_t len, + uint64_t* actualSize); +AWS_S3_EXPORT AwsS3Result awsS3Delete(AwsS3Handle* handle, const char* object); +AWS_S3_EXPORT AwsS3Result awsS3Head(AwsS3Handle* handle, const char* object, uint64_t* size); + +AWS_S3_EXPORT AwsS3SteamWriter* awsS3OpenStreamWriter(AwsS3Handle* handle, const char* name); +AWS_S3_EXPORT void awsS3CloseStreamWriter(AwsS3SteamWriter* steamWriter); +AWS_S3_EXPORT AwsS3Result awsS3WriteStream(AwsS3SteamWriter* steamWriter, const char* buf, uint64_t size); +AWS_S3_EXPORT AwsS3Result awsS3FlushStream(AwsS3SteamWriter* steamWriter); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/build.sh b/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2cc2188e7e54173391483fa2b947a6968330efb --- /dev/null +++ b/build.sh @@ -0,0 +1,19 @@ +export OPEN_SSL_INSTALL_PATH=/var/lib/jenkins/openssl-OpenSSL_1_1_1q/install +export CURL_INSTALL_PATH=/var/lib/jenkins/curl-7.84.0/install + +mkdir -p deps +cd aws-sdk-cpp +#./prefetch_crt_dependency.sh + +mkdir build +cd build +unset LD_LIBRARY_PATH +export CMAKE_INCLUDE_PATH="${OPEN_SSL_INSTALL_PATH}/include/:${CURL_INSTALL_PATH}/include/" +export CMAKE_LIBRARY_PATH="${OPEN_SSL_INSTALL_PATH}/lib:${CURL_INSTALL_PATH}/lib" + +cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_ONLY="s3" -DENABLE_UNITY_BUILD=OFF -DFORCE_SHARED_CRT=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX="../../deps" ../ + +make -j10 + +make install +cd ../../ diff --git a/test_aws_s3.cpp b/test_aws_s3.cpp new file mode 100644 index 0000000000000000000000000000000000000000..40166db48202c61bd681fdbe7ec12fc8d4e245cb --- /dev/null +++ b/test_aws_s3.cpp @@ -0,0 +1,93 @@ +#include "stdlib.h" +#include "stdio.h" +#include "aws-s3-c.h" +#include "string.h" + +int main() +{ + AwsS3EnvOptions options; + memset(&options, 0, sizeof(AwsS3EnvOptions)); + AwsS3Env* env = awsS3InitEnv(options); + + AwsS3BucketConf conf; + conf.endpoint = "192.168.18.103:9000"; + conf.region = "shenzhen"; + conf.ak = "test1234"; + conf.sk = "test1234"; + conf.name = "testbucket"; + + AwsS3HandleOptions handleOptions; + handleOptions.useHttps = false; + handleOptions.useSSL = false; + + AwsS3Handle* handle = awsS3OpenHandle(&conf, &handleOptions); + if (handle == NULL) { + printf("test failed\n"); + aswS3ReleaseEnv(env); + return -1; + } + + AwsS3Result ret = awsS3Put(handle, "testobj", "1234", 4); + if (ret != S3_SUCCESS) { + printf("test put failed\n"); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return -1; + } + char readBuf[4] = {0}; + uint64_t actLen; + ret = awsS3Get(handle, "testobj", 0, readBuf, 4, &actLen); + if (ret != S3_SUCCESS || (actLen != 4 || memcmp(readBuf, "1234", 4) != 0)) { + printf("test get failed\n"); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return -1; + } + printf("test put/get success\n"); + + AwsS3SteamWriter* writer = awsS3OpenStreamWriter(handle, "testMultiobj"); + char* buf = (char*)malloc(1024 * 1024); + memset(buf, '1', 1024 * 1024); + for (int i = 0; i < 9; i++) { + AwsS3Result ret = awsS3WriteStream(writer, buf, 1024 * 1024); + if (ret != S3_SUCCESS) { + printf("test multi put failed\n"); + awsS3CloseStreamWriter(writer); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return -1; + } + } + + // allow multi flush + ret = awsS3FlushStream(writer); + if (ret != S3_SUCCESS) { + printf("test multi failed\n"); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return -1; + } + + ret = awsS3FlushStream(writer); + if (ret != S3_SUCCESS) { + printf("flush multi failed\n"); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return -1; + } + // write again, not allowed + ret = awsS3WriteStream(writer, buf, 1024 * 1024); + if (ret == S3_SUCCESS) { + printf("test multi put failed\n"); + awsS3CloseStreamWriter(writer); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return -1; + } + + awsS3CloseStreamWriter(writer); + printf("test multi success\n"); + awsS3CloseHandle(handle); + aswS3ReleaseEnv(env); + return 0; +}