From 91ab0cbae018bf644860778355b20566bce0f048 Mon Sep 17 00:00:00 2001
From: aws-sdk-cpp-automation The state of the address pool. The state of the address range.
+ *
+ * Indicates whether your client's IP address is preserved as the source. The
- * value is If
- * If Default:
- * Indicates whether the client IP address is preserved as the source. The
+ * following are the possible values. Default:
+ * The Amazon Resource Name (ARN) of the Outpost. The Amazon Resource Name (ARN) of the Outpost on which to create the
+ * volume. If you intend to use a volume with an instance running on an
+ * outpost, then you must create the volume on the same outpost as the instance.
+ * You can't use a volume created in an Amazon Web Services Region with an instance
+ * on an Amazon Web Services outpost, or the other way around. The volume IDs. The volume IDs. If not specified, then all volumes are included in the
+ * response.
*/
inline const ByoipCidrState& GetState() const{ return m_state; }
inline bool StateHasBeenSet() const { return m_stateHasBeenSet; }
diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/CreateInstanceConnectEndpointRequest.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/CreateInstanceConnectEndpointRequest.h
index 8ab86a55b77..0a00292dcf7 100644
--- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/CreateInstanceConnectEndpointRequest.h
+++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/CreateInstanceConnectEndpointRequest.h
@@ -86,12 +86,11 @@ namespace Model
///@{
/**
- * advertised
:
+ * The address range is being advertised to the internet by Amazon Web
+ * Services.deprovisioned
: The address range is
+ * deprovisioned.failed-deprovision
: The request
+ * to deprovision the address range was unsuccessful. Ensure that all EIPs from the
+ * range have been deallocated and try again.failed-provision
: The request to provision the address range was
+ * unsuccessful.pending-deprovision
: You’ve
+ * submitted a request to deprovision an address range and it's pending.pending-provision
: You’ve submitted a request to provision
+ * an address range and it's pending.provisioned
:
+ * The address range is provisioned and can be advertised. The range is not
+ * currently advertised.provisioned-not-publicly-advertisable
: The address range is
+ * provisioned and cannot be advertised.true
or false
.
true
, your client's IP address is used when you connect to a
- * resource.false
, the elastic network interface
- * IP address is used when you connect to a resource.true
true
- Use the
+ * client IP address as the source.false
- Use the
+ * network interface IP address as the source.false
The parameter accepts an * integer, which Amazon EC2 interprets as a percentage.
If you set
- * DesiredCapacityType
to vcpu
or
+ * TargetCapacityUnitType
to vcpu
or
* memory-mib
, the price protection threshold is based on the per vCPU
* or per memory price instead of the per instance price.
Only one of
* SpotMaxPricePercentageOverLowestPrice
or
diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceRequirementsRequest.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceRequirementsRequest.h
index f448d690dee..09b26e60441 100644
--- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceRequirementsRequest.h
+++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceRequirementsRequest.h
@@ -533,7 +533,7 @@ namespace Model
* selects instance types with your attributes, it will exclude instance types
* whose price exceeds your specified threshold.
The parameter accepts an * integer, which Amazon EC2 interprets as a percentage.
If you set
- * DesiredCapacityType
to vcpu
or
+ * TargetCapacityUnitType
to vcpu
or
* memory-mib
, the price protection threshold is based on the per vCPU
* or per memory price instead of the per instance price.
Only one of
* SpotMaxPricePercentageOverLowestPrice
or
diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/ModifyInstanceAttributeRequest.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/ModifyInstanceAttributeRequest.h
index e9b2efe7936..254fbc48d09 100644
--- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/ModifyInstanceAttributeRequest.h
+++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/ModifyInstanceAttributeRequest.h
@@ -265,10 +265,11 @@ namespace Model
///@{
/**
- *
Changes the instance's user data to the specified value. If you are using an - * Amazon Web Services SDK or command line tool, base64-encoding is performed for - * you, and you can load the text from a file. Otherwise, you must provide - * base64-encoded text.
+ *Changes the instance's user data to the specified value. User data must be + * base64-encoded. Depending on the tool or SDK that you're using, the + * base64-encoding might be performed for you. For more information, see Work + * with instance user data.
*/ inline const BlobAttributeValue& GetUserData() const{ return m_userData; } inline bool UserDataHasBeenSet() const { return m_userDataHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/RunInstancesRequest.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/RunInstancesRequest.h index 282ea92e9df..bf0222b9e5b 100644 --- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/RunInstancesRequest.h +++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/RunInstancesRequest.h @@ -307,13 +307,11 @@ namespace Model ///@{ /** - *The user data script to make available to the instance. For more information, - * see Run - * commands on your Amazon EC2 instance at launch in the Amazon EC2 User - * Guide. If you are using a command line tool, base64-encoding is performed - * for you, and you can load the text from a file. Otherwise, you must provide - * base64-encoded text. User data is limited to 16 KB.
+ *The user data to make available to the instance. User data must be + * base64-encoded. Depending on the tool or SDK that you're using, the + * base64-encoding might be performed for you. For more information, see Work + * with instance user data.
*/ inline const Aws::String& GetUserData() const{ return m_userData; } inline bool UserDataHasBeenSet() const { return m_userDataHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-ec2/source/model/HostTenancy.cpp b/generated/src/aws-cpp-sdk-ec2/source/model/HostTenancy.cpp index 9ca6148867f..f1d44327bd9 100644 --- a/generated/src/aws-cpp-sdk-ec2/source/model/HostTenancy.cpp +++ b/generated/src/aws-cpp-sdk-ec2/source/model/HostTenancy.cpp @@ -20,6 +20,7 @@ namespace Aws namespace HostTenancyMapper { + static const int default__HASH = HashingUtils::HashString("default"); static const int dedicated_HASH = HashingUtils::HashString("dedicated"); static const int host_HASH = HashingUtils::HashString("host"); @@ -27,7 +28,11 @@ namespace Aws HostTenancy GetHostTenancyForName(const Aws::String& name) { int hashCode = HashingUtils::HashString(name.c_str()); - if (hashCode == dedicated_HASH) + if (hashCode == default__HASH) + { + return HostTenancy::default_; + } + else if (hashCode == dedicated_HASH) { return HostTenancy::dedicated; } @@ -51,6 +56,8 @@ namespace Aws { case HostTenancy::NOT_SET: return {}; + case HostTenancy::default_: + return "default"; case HostTenancy::dedicated: return "dedicated"; case HostTenancy::host: diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/S3CrtClient.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/S3CrtClient.h index a638804fc34..44c68b9a387 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/S3CrtClient.h +++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/S3CrtClient.h @@ -394,17 +394,16 @@ namespace Aws *When the request is an HTTP 1.1
* request, the response is chunk encoded. When the request is not an HTTP 1.1
* request, the response would not contain the Content-Length
. You
- * always need to read the entire response body to check if the copy succeeds. to
- * keep the connection alive while we copy the data.
If the copy - * is successful, you receive a response with information about the copied - * object.
A copy request might return an error when Amazon S3
- * receives the copy request or while Amazon S3 is copying the files. A 200
- * OK
response can contain either a success or an error.
If - * the error occurs before the copy action starts, you receive a standard Amazon S3 - * error.
If the error occurs during the copy operation, the
- * error response is embedded in the If the copy is successful, you receive a response with information
+ * about the copied object. A copy request might return an error
+ * when Amazon S3 receives the copy request or while Amazon S3 is copying the
+ * files. A If the error occurs before the copy action starts, you
+ * receive a standard Amazon S3 error. If the error occurs during
+ * the copy operation, the error response is embedded in the
* Directory bucket permissions - To grant access to this API operation on a
* directory bucket, we recommend that you use the s3:PutObject permission to write the object copy to the
* destination bucket. For information about permissions
* required to use the multipart upload API, see Multipart
- * Upload and Permissions in the Amazon S3 User Guide.200 OK
response. For example, in
- * a cross-region copy, you may encounter throttling and receive a 200
- * OK
response. For more information, see
+ *
200 OK
response can contain either a success or an
+ * error.
200 OK
+ * response. For example, in a cross-region copy, you may encounter throttling and
+ * receive a 200 OK
response. For more information, see Resolve
* the Error 200 response when copying objects to Amazon S3. The 200
* OK
status code means the copy was accepted, but it doesn't mean the copy
@@ -1620,7 +1619,7 @@ namespace Aws
* s3:DeleteObject
- To delete an object from a bucket,
* you must always specify the s3:DeleteObject
permission.s3:DeleteObjectVersion
- To delete a specific
- * version of an object from a versiong-enabled bucket, you must specify the
+ * version of an object from a versioning-enabled bucket, you must specify the
* s3:DeleteObjectVersion
permission.
Directory bucket permissions - You must have permissions in a bucket
- * policy or an IAM identity-based policy based on the source and destination
- * bucket types in an UploadPartCopy
operation.
If
- * the source object that you want to copy is in a directory bucket, you must have
- * the s3express:CreateSession
permission in the
- * Action
element of a policy to read the object . By default, the
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions">Multipart
+ * upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a
+ * bucket policy or an IAM identity-based policy based on the source and
+ * destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you
+ * must have the s3express:CreateSession
permission in the
+ * Action
element of a policy to read the object. By default, the
* session is in the ReadWrite
mode. If you want to restrict the
* access, you can explicitly set the s3express:SessionMode
condition
* key to ReadOnly
on the copy source bucket.
If the diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/CreateSessionResult.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/CreateSessionResult.h index 80a265781be..6844fff3f07 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/CreateSessionResult.h +++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/CreateSessionResult.h @@ -35,7 +35,7 @@ namespace Model ///@{ /** - *
The established temporary security credentials for the created session..
+ *The established temporary security credentials for the created session.
*/ inline const SessionCredentials& GetCredentials() const{ return m_credentials; } inline void SetCredentials(const SessionCredentials& value) { m_credentials = value; } diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/HeadObjectRequest.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/HeadObjectRequest.h index b564b237bc3..bde19d30a3d 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/HeadObjectRequest.h +++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/HeadObjectRequest.h @@ -207,6 +207,88 @@ namespace Model inline HeadObjectRequest& WithRange(const char* value) { SetRange(value); return *this;} ///@} + ///@{ + /** + *Sets the Cache-Control
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Type
header of the response.
Sets the Expires
header of the response.
Version ID used to reference a specific version of the object.
@@ -369,6 +451,24 @@ namespace Model Aws::String m_range; bool m_rangeHasBeenSet = false; + Aws::String m_responseCacheControl; + bool m_responseCacheControlHasBeenSet = false; + + Aws::String m_responseContentDisposition; + bool m_responseContentDispositionHasBeenSet = false; + + Aws::String m_responseContentEncoding; + bool m_responseContentEncodingHasBeenSet = false; + + Aws::String m_responseContentLanguage; + bool m_responseContentLanguageHasBeenSet = false; + + Aws::String m_responseContentType; + bool m_responseContentTypeHasBeenSet = false; + + Aws::Utils::DateTime m_responseExpires; + bool m_responseExpiresHasBeenSet = false; + Aws::String m_versionId; bool m_versionIdHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/IndexDocument.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/IndexDocument.h index dc7afe0c2b2..aaa2ebae3ad 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/IndexDocument.h +++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/IndexDocument.h @@ -40,12 +40,12 @@ namespace Model ///@{ /** *A suffix that is appended to a request that is for a directory on the website - * endpoint (for example,if the suffix is index.html and you make a request to - * samplebucket/images/ the data that is returned will be for the object with the - * key name images/index.html) The suffix must not be empty and must not include a - * slash character.
Replacement must be made for object keys
- * containing special characters (such as carriage returns) when using XML
- * requests. For more information, see index.html and you make a
+ * request to samplebucket/images/
, the data that is returned will be
+ * for the object with the key name images/index.html
.) The suffix
+ * must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such + * as carriage returns) when using XML requests. For more information, see * XML related object key constraints.
*/ diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsResult.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsResult.h index 56edb13e2f1..e0c959d214b 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsResult.h +++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsResult.h @@ -175,8 +175,8 @@ namespace Model /** *Encoding type used by Amazon S3 to encode object keys in the response. If
* using url
, non-ASCII characters used in an object's key name will
- * be URL encoded. For example, the object test_file(3).png will appear as
- * test_file%283%29.png.
test_file(3).png
will
+ * appear as test_file%283%29.png
.
*/
inline const EncodingType& GetEncodingType() const{ return m_encodingType; }
inline void SetEncodingType(const EncodingType& value) { m_encodingType = value; }
diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsV2Request.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsV2Request.h
index 712728186f7..076855ac9da 100644
--- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsV2Request.h
+++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/ListObjectsV2Request.h
@@ -118,8 +118,8 @@ namespace Model
/**
* Encoding type used by Amazon S3 to encode object keys in the response. If
* using url
, non-ASCII characters used in an object's key name will
- * be URL encoded. For example, the object test_file(3).png will appear as
- * test_file%283%29.png.
test_file(3).png
will
+ * appear as test_file%283%29.png
.
*/
inline const EncodingType& GetEncodingType() const{ return m_encodingType; }
inline bool EncodingTypeHasBeenSet() const { return m_encodingTypeHasBeenSet; }
diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionExpiration.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionExpiration.h
index cbdaf1e554c..344749718cc 100644
--- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionExpiration.h
+++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionExpiration.h
@@ -56,9 +56,9 @@ namespace Model
///@{
/**
- * Specifies how many newer noncurrent versions must exist before Amazon S3 can - * perform the associated action on a given version. If there are this many more - * recent noncurrent versions, Amazon S3 will take the associated action. For more + *
Specifies how many noncurrent versions Amazon S3 will retain. You can specify + * up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + * additional noncurrent versions beyond the specified number to retain. For more * information about noncurrent versions, see Lifecycle * configuration elements in the Amazon S3 User Guide.
diff --git a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionTransition.h b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionTransition.h index 4ecdca7ec28..2859d469ac5 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionTransition.h +++ b/generated/src/aws-cpp-sdk-s3-crt/include/aws/s3-crt/model/NoncurrentVersionTransition.h @@ -75,10 +75,11 @@ namespace Model ///@{ /** - *Specifies how many newer noncurrent versions must exist before Amazon S3 can - * perform the associated action on a given version. If there are this many more - * recent noncurrent versions, Amazon S3 will take the associated action. For more - * information about noncurrent versions, see Specifies how many noncurrent versions Amazon S3 will retain in the same + * storage class before transitioning objects. You can specify up to 100 noncurrent + * versions to retain. Amazon S3 will transition any additional noncurrent versions + * beyond the specified number to retain. For more information about noncurrent + * versions, see Lifecycle * configuration elements in the Amazon S3 User Guide.
*/ diff --git a/generated/src/aws-cpp-sdk-s3-crt/source/model/HeadObjectRequest.cpp b/generated/src/aws-cpp-sdk-s3-crt/source/model/HeadObjectRequest.cpp index eeb06a0b13c..bb66bc01d22 100644 --- a/generated/src/aws-cpp-sdk-s3-crt/source/model/HeadObjectRequest.cpp +++ b/generated/src/aws-cpp-sdk-s3-crt/source/model/HeadObjectRequest.cpp @@ -24,6 +24,12 @@ HeadObjectRequest::HeadObjectRequest() : m_ifUnmodifiedSinceHasBeenSet(false), m_keyHasBeenSet(false), m_rangeHasBeenSet(false), + m_responseCacheControlHasBeenSet(false), + m_responseContentDispositionHasBeenSet(false), + m_responseContentEncodingHasBeenSet(false), + m_responseContentLanguageHasBeenSet(false), + m_responseContentTypeHasBeenSet(false), + m_responseExpiresHasBeenSet(false), m_versionIdHasBeenSet(false), m_sSECustomerAlgorithmHasBeenSet(false), m_sSECustomerKeyHasBeenSet(false), @@ -47,6 +53,48 @@ Aws::String HeadObjectRequest::SerializePayload() const void HeadObjectRequest::AddQueryStringParameters(URI& uri) const { Aws::StringStream ss; + if(m_responseCacheControlHasBeenSet) + { + ss << m_responseCacheControl; + uri.AddQueryStringParameter("response-cache-control", ss.str()); + ss.str(""); + } + + if(m_responseContentDispositionHasBeenSet) + { + ss << m_responseContentDisposition; + uri.AddQueryStringParameter("response-content-disposition", ss.str()); + ss.str(""); + } + + if(m_responseContentEncodingHasBeenSet) + { + ss << m_responseContentEncoding; + uri.AddQueryStringParameter("response-content-encoding", ss.str()); + ss.str(""); + } + + if(m_responseContentLanguageHasBeenSet) + { + ss << m_responseContentLanguage; + uri.AddQueryStringParameter("response-content-language", ss.str()); + ss.str(""); + } + + if(m_responseContentTypeHasBeenSet) + { + ss << m_responseContentType; + uri.AddQueryStringParameter("response-content-type", ss.str()); + ss.str(""); + } + + if(m_responseExpiresHasBeenSet) + { + ss << m_responseExpires.ToGmtString(Aws::Utils::DateFormat::RFC822); + uri.AddQueryStringParameter("response-expires", ss.str()); + ss.str(""); + } + if(m_versionIdHasBeenSet) { ss << m_versionId; diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/S3Client.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/S3Client.h index 40e0a9720b6..527f0e9703d 100644 --- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/S3Client.h +++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/S3Client.h @@ -405,17 +405,16 @@ namespace Aws *When the request is an HTTP 1.1
* request, the response is chunk encoded. When the request is not an HTTP 1.1
* request, the response would not contain the Content-Length
. You
- * always need to read the entire response body to check if the copy succeeds. to
- * keep the connection alive while we copy the data.
If the copy - * is successful, you receive a response with information about the copied - * object.
A copy request might return an error when Amazon S3
- * receives the copy request or while Amazon S3 is copying the files. A 200
- * OK
response can contain either a success or an error.
If - * the error occurs before the copy action starts, you receive a standard Amazon S3 - * error.
If the error occurs during the copy operation, the
- * error response is embedded in the If the copy is successful, you receive a response with information
+ * about the copied object. A copy request might return an error
+ * when Amazon S3 receives the copy request or while Amazon S3 is copying the
+ * files. A If the error occurs before the copy action starts, you
+ * receive a standard Amazon S3 error. If the error occurs during
+ * the copy operation, the error response is embedded in the
* Directory bucket permissions - To grant access to this API operation on a
* directory bucket, we recommend that you use the s3:PutObject permission to write the object copy to the
* destination bucket. For information about permissions
* required to use the multipart upload API, see Multipart
- * Upload and Permissions in the Amazon S3 User Guide.200 OK
response. For example, in
- * a cross-region copy, you may encounter throttling and receive a 200
- * OK
response. For more information, see
+ *
200 OK
response can contain either a success or an
+ * error.
200 OK
+ * response. For example, in a cross-region copy, you may encounter throttling and
+ * receive a 200 OK
response. For more information, see Resolve
* the Error 200 response when copying objects to Amazon S3. The 200
* OK
status code means the copy was accepted, but it doesn't mean the copy
@@ -1635,7 +1634,7 @@ namespace Aws
* s3:DeleteObject
- To delete an object from a bucket,
* you must always specify the s3:DeleteObject
permission.s3:DeleteObjectVersion
- To delete a specific
- * version of an object from a versiong-enabled bucket, you must specify the
+ * version of an object from a versioning-enabled bucket, you must specify the
* s3:DeleteObjectVersion
permission.
Directory bucket permissions - You must have permissions in a bucket
- * policy or an IAM identity-based policy based on the source and destination
- * bucket types in an UploadPartCopy
operation.
If
- * the source object that you want to copy is in a directory bucket, you must have
- * the s3express:CreateSession
permission in the
- * Action
element of a policy to read the object . By default, the
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions">Multipart
+ * upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a
+ * bucket policy or an IAM identity-based policy based on the source and
+ * destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you
+ * must have the s3express:CreateSession
permission in the
+ * Action
element of a policy to read the object. By default, the
* session is in the ReadWrite
mode. If you want to restrict the
* access, you can explicitly set the s3express:SessionMode
condition
* key to ReadOnly
on the copy source bucket.
If the diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/CreateSessionResult.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/CreateSessionResult.h index 03924fbbf52..cf1c35aba83 100644 --- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/CreateSessionResult.h +++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/CreateSessionResult.h @@ -35,7 +35,7 @@ namespace Model ///@{ /** - *
The established temporary security credentials for the created session..
+ *The established temporary security credentials for the created session.
*/ inline const SessionCredentials& GetCredentials() const{ return m_credentials; } inline void SetCredentials(const SessionCredentials& value) { m_credentials = value; } diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h index c70c3545326..a982e16c567 100644 --- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h +++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h @@ -207,6 +207,88 @@ namespace Model inline HeadObjectRequest& WithRange(const char* value) { SetRange(value); return *this;} ///@} + ///@{ + /** + *Sets the Cache-Control
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Type
header of the response.
Sets the Expires
header of the response.
Version ID used to reference a specific version of the object.
@@ -369,6 +451,24 @@ namespace Model Aws::String m_range; bool m_rangeHasBeenSet = false; + Aws::String m_responseCacheControl; + bool m_responseCacheControlHasBeenSet = false; + + Aws::String m_responseContentDisposition; + bool m_responseContentDispositionHasBeenSet = false; + + Aws::String m_responseContentEncoding; + bool m_responseContentEncodingHasBeenSet = false; + + Aws::String m_responseContentLanguage; + bool m_responseContentLanguageHasBeenSet = false; + + Aws::String m_responseContentType; + bool m_responseContentTypeHasBeenSet = false; + + Aws::Utils::DateTime m_responseExpires; + bool m_responseExpiresHasBeenSet = false; + Aws::String m_versionId; bool m_versionIdHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h index 47edc83ca93..f0d544c1899 100644 --- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h +++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h @@ -40,12 +40,12 @@ namespace Model ///@{ /** *A suffix that is appended to a request that is for a directory on the website - * endpoint (for example,if the suffix is index.html and you make a request to - * samplebucket/images/ the data that is returned will be for the object with the - * key name images/index.html) The suffix must not be empty and must not include a - * slash character.
Replacement must be made for object keys
- * containing special characters (such as carriage returns) when using XML
- * requests. For more information, see index.html and you make a
+ * request to samplebucket/images/
, the data that is returned will be
+ * for the object with the key name images/index.html
.) The suffix
+ * must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such + * as carriage returns) when using XML requests. For more information, see * XML related object key constraints.
*/ diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h index 77632fdde6d..980bba4e82a 100644 --- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h +++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h @@ -175,8 +175,8 @@ namespace Model /** *Encoding type used by Amazon S3 to encode object keys in the response. If
* using url
, non-ASCII characters used in an object's key name will
- * be URL encoded. For example, the object test_file(3).png will appear as
- * test_file%283%29.png.
test_file(3).png
will
+ * appear as test_file%283%29.png
.
*/
inline const EncodingType& GetEncodingType() const{ return m_encodingType; }
inline void SetEncodingType(const EncodingType& value) { m_encodingType = value; }
diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h
index 900a589de2f..420bd419ff4 100644
--- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h
+++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h
@@ -118,8 +118,8 @@ namespace Model
/**
* Encoding type used by Amazon S3 to encode object keys in the response. If
* using url
, non-ASCII characters used in an object's key name will
- * be URL encoded. For example, the object test_file(3).png will appear as
- * test_file%283%29.png.
test_file(3).png
will
+ * appear as test_file%283%29.png
.
*/
inline const EncodingType& GetEncodingType() const{ return m_encodingType; }
inline bool EncodingTypeHasBeenSet() const { return m_encodingTypeHasBeenSet; }
diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h
index 058ab4d8ef5..9d4da840229 100644
--- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h
+++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h
@@ -56,9 +56,9 @@ namespace Model
///@{
/**
- * Specifies how many newer noncurrent versions must exist before Amazon S3 can - * perform the associated action on a given version. If there are this many more - * recent noncurrent versions, Amazon S3 will take the associated action. For more + *
Specifies how many noncurrent versions Amazon S3 will retain. You can specify + * up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + * additional noncurrent versions beyond the specified number to retain. For more * information about noncurrent versions, see Lifecycle * configuration elements in the Amazon S3 User Guide.
diff --git a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h index cf4320548cc..92d3fd47e03 100644 --- a/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h +++ b/generated/src/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h @@ -75,10 +75,11 @@ namespace Model ///@{ /** - *Specifies how many newer noncurrent versions must exist before Amazon S3 can - * perform the associated action on a given version. If there are this many more - * recent noncurrent versions, Amazon S3 will take the associated action. For more - * information about noncurrent versions, see Specifies how many noncurrent versions Amazon S3 will retain in the same + * storage class before transitioning objects. You can specify up to 100 noncurrent + * versions to retain. Amazon S3 will transition any additional noncurrent versions + * beyond the specified number to retain. For more information about noncurrent + * versions, see Lifecycle * configuration elements in the Amazon S3 User Guide.
*/ diff --git a/generated/src/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp b/generated/src/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp index 697e04670a2..412d7497585 100644 --- a/generated/src/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp +++ b/generated/src/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp @@ -24,6 +24,12 @@ HeadObjectRequest::HeadObjectRequest() : m_ifUnmodifiedSinceHasBeenSet(false), m_keyHasBeenSet(false), m_rangeHasBeenSet(false), + m_responseCacheControlHasBeenSet(false), + m_responseContentDispositionHasBeenSet(false), + m_responseContentEncodingHasBeenSet(false), + m_responseContentLanguageHasBeenSet(false), + m_responseContentTypeHasBeenSet(false), + m_responseExpiresHasBeenSet(false), m_versionIdHasBeenSet(false), m_sSECustomerAlgorithmHasBeenSet(false), m_sSECustomerKeyHasBeenSet(false), @@ -47,6 +53,48 @@ Aws::String HeadObjectRequest::SerializePayload() const void HeadObjectRequest::AddQueryStringParameters(URI& uri) const { Aws::StringStream ss; + if(m_responseCacheControlHasBeenSet) + { + ss << m_responseCacheControl; + uri.AddQueryStringParameter("response-cache-control", ss.str()); + ss.str(""); + } + + if(m_responseContentDispositionHasBeenSet) + { + ss << m_responseContentDisposition; + uri.AddQueryStringParameter("response-content-disposition", ss.str()); + ss.str(""); + } + + if(m_responseContentEncodingHasBeenSet) + { + ss << m_responseContentEncoding; + uri.AddQueryStringParameter("response-content-encoding", ss.str()); + ss.str(""); + } + + if(m_responseContentLanguageHasBeenSet) + { + ss << m_responseContentLanguage; + uri.AddQueryStringParameter("response-content-language", ss.str()); + ss.str(""); + } + + if(m_responseContentTypeHasBeenSet) + { + ss << m_responseContentType; + uri.AddQueryStringParameter("response-content-type", ss.str()); + ss.str(""); + } + + if(m_responseExpiresHasBeenSet) + { + ss << m_responseExpires.ToGmtString(Aws::Utils::DateFormat::RFC822); + uri.AddQueryStringParameter("response-expires", ss.str()); + ss.str(""); + } + if(m_versionIdHasBeenSet) { ss << m_versionId; diff --git a/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h b/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h index aa1a36f4563..b3973d00c69 100644 --- a/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h +++ b/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h @@ -4,7 +4,7 @@ */ #pragma once -#define AWS_SDK_VERSION_STRING "1.11.360" +#define AWS_SDK_VERSION_STRING "1.11.361" #define AWS_SDK_VERSION_MAJOR 1 #define AWS_SDK_VERSION_MINOR 11 -#define AWS_SDK_VERSION_PATCH 360 +#define AWS_SDK_VERSION_PATCH 361 diff --git a/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json b/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json index 98a758ff460..62feb5aa9ca 100644 --- a/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json +++ b/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json @@ -9685,7 +9685,7 @@ }, "State":{ "shape":"ByoipCidrState", - "documentation":"The state of the address pool.
", + "documentation":"The state of the address range.
advertised
: The address range is being advertised to the internet by Amazon Web Services.
deprovisioned
: The address range is deprovisioned.
failed-deprovision
: The request to deprovision the address range was unsuccessful. Ensure that all EIPs from the range have been deallocated and try again.
failed-provision
: The request to provision the address range was unsuccessful.
pending-deprovision
: You’ve submitted a request to deprovision an address range and it's pending.
pending-provision
: You’ve submitted a request to provision an address range and it's pending.
provisioned
: The address range is provisioned and can be advertised. The range is not currently advertised.
provisioned-not-publicly-advertisable
: The address range is provisioned and cannot be advertised.
Indicates whether your client's IP address is preserved as the source. The value is true
or false
.
If true
, your client's IP address is used when you connect to a resource.
If false
, the elastic network interface IP address is used when you connect to a resource.
Default: true
Indicates whether the client IP address is preserved as the source. The following are the possible values.
true
- Use the client IP address as the source.
false
- Use the network interface IP address as the source.
Default: false
The Amazon Resource Name (ARN) of the Outpost.
" + "documentation":"The Amazon Resource Name (ARN) of the Outpost on which to create the volume.
If you intend to use a volume with an instance running on an outpost, then you must create the volume on the same outpost as the instance. You can't use a volume created in an Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other way around.
" }, "Size":{ "shape":"Integer", @@ -25005,7 +25005,7 @@ }, "VolumeIds":{ "shape":"VolumeIdStringList", - "documentation":"The volume IDs.
", + "documentation":"The volume IDs. If not specified, then all volumes are included in the response.
", "locationName":"VolumeId" }, "DryRun":{ @@ -32909,6 +32909,7 @@ "HostTenancy":{ "type":"string", "enum":[ + "default", "dedicated", "host" ] @@ -35944,7 +35945,7 @@ }, "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice":{ "shape":"Integer", - "documentation":"[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.
You must specify VCpuCount
and MemoryMiB
. All other attributes are optional. Any unspecified optional attribute is set to its default.
When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.
To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:
AllowedInstanceTypes
- The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.
ExcludedInstanceTypes
- The instance types to exclude from the list, even if they match your specified attributes.
If you specify InstanceRequirements
, you can't specify InstanceType
.
Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard, or with the RunInstances API or AWS::EC2::Instance Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements
.
For more information, see Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.
" @@ -42558,7 +42559,7 @@ }, "UserData":{ "shape":"BlobAttributeValue", - "documentation":"Changes the instance's user data to the specified value. If you are using an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.
", + "documentation":"Changes the instance's user data to the specified value. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.
", "locationName":"userData" }, "Value":{ @@ -51952,7 +51953,7 @@ }, "UserData":{ "shape":"RunInstancesUserData", - "documentation":"The user data script to make available to the instance. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.
" + "documentation":"The user data to make available to the instance. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.
" }, "AdditionalInfo":{ "shape":"String", diff --git a/tools/code-generation/api-descriptions/fms-2018-01-01.normal.json b/tools/code-generation/api-descriptions/fms-2018-01-01.normal.json index 043bc8da021..a2dc2137e6c 100644 --- a/tools/code-generation/api-descriptions/fms-2018-01-01.normal.json +++ b/tools/code-generation/api-descriptions/fms-2018-01-01.normal.json @@ -11,7 +11,8 @@ "serviceId":"FMS", "signatureVersion":"v4", "targetPrefix":"AWSFMS_20180101", - "uid":"fms-2018-01-01" + "uid":"fms-2018-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAdminAccount":{ @@ -2548,7 +2549,7 @@ }, "ManagedServiceData":{ "type":"string", - "max":10000, + "max":30000, "min":1, "pattern":"^((?!\\\\[nr]).)+" }, diff --git a/tools/code-generation/api-descriptions/s3-2006-03-01.normal.json b/tools/code-generation/api-descriptions/s3-2006-03-01.normal.json index 5e39e04c2e6..ecc0aa679ca 100644 --- a/tools/code-generation/api-descriptions/s3-2006-03-01.normal.json +++ b/tools/code-generation/api-descriptions/s3-2006-03-01.normal.json @@ -11,7 +11,8 @@ "serviceFullName":"Amazon Simple Storage Service", "serviceId":"S3", "signatureVersion":"s3", - "uid":"s3-2006-03-01" + "uid":"s3-2006-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AbortMultipartUpload":{ @@ -52,7 +53,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object . By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
For information about permissions required to use the multipart upload API, see Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
The established temporary security credentials for the created session..
", + "documentation":"The established temporary security credentials for the created session.
", "locationName":"Credentials" } } @@ -5785,6 +5787,42 @@ "location":"header", "locationName":"Range" }, + "ResponseCacheControl":{ + "shape":"ResponseCacheControl", + "documentation":"Sets the Cache-Control
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Type
header of the response.
Sets the Expires
header of the response.
Version ID used to reference a specific version of the object.
For directory buckets in this API operation, only the null
value of the version ID is supported.
A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
A suffix that is appended to a request that is for a directory on the website endpoint. (For example, if the suffix is index.html
and you make a request to samplebucket/images/
, the data that is returned will be for the object with the key name images/index.html
.) The suffix must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
Container for the Suffix
element.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" + "documentation":"Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" } }, "documentation":"Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.
" @@ -7514,7 +7552,7 @@ }, "NewerNoncurrentVersions":{ "shape":"VersionCount", - "documentation":"Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" + "documentation":"Specifies how many noncurrent versions Amazon S3 will retain in the same storage class before transitioning objects. You can specify up to 100 noncurrent versions to retain. Amazon S3 will transition any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" } }, "documentation":"Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA
, ONEZONE_IA
, INTELLIGENT_TIERING
, GLACIER_IR
, GLACIER
, or DEEP_ARCHIVE
storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
, ONEZONE_IA
, INTELLIGENT_TIERING
, GLACIER_IR
, GLACIER
, or DEEP_ARCHIVE
storage class at a specific period in the object's lifetime.