view release on metacpan or search on metacpan
For more information, please refer to
[Amazon's documentation for DELETE](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html).
## copy\_object( $src\_bucket, $src\_key, $dst\_bucket, $dst\_key \[, $headers\] )
**Arguments**: a list with source (bucket, key) and destination (bucket, key), hashref with extra header information (**optional**).
**Returns**: an [HTTP::Response](https://metacpan.org/pod/HTTP%3A%3AResponse) object for the request.
This method is a variation of the PUT operation as described by
Amazon's S3 API. It creates a copy of an object that is already stored
in Amazon S3. This "PUT copy" operation is the same as performing a GET
from the old bucket/key and then a PUT to the new bucket/key.
Note that the COPY request might return error response in 200 OK, but this method
will determine the error response and rewrite the status code to 500.
For more information, please refer to
[Amazon's documentation for COPY](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html).
## put\_object( $bucket, $key, $content \[, $headers\] )
**Arguments**:
- 1. bucket - a string with the destination bucket
- 2. key - a string with the destination key
- 3. content - a string with the content to be uploaded
- 4. headers (**optional**) - hashref with extra header information
**Returns**: an [HTTP::Response](https://metacpan.org/pod/HTTP%3A%3AResponse) object for the request.
The PUT operation adds an object to a bucket. Amazon S3 never adds partial
objects; if you receive a success response, Amazon S3 added the entire
object to the bucket.
For more information, please refer to
[Amazon's documentation for PUT](http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html).
## delete\_multiple\_objects( $bucket, @keys )
**Arguments**: a string with the bucket name, and an array with all the keys
to be deleted.
**Returns**: an [HTTP::Response](https://metacpan.org/pod/HTTP%3A%3AResponse) object for the request.
The Multi-Object Delete operation enables you to delete multiple objects
(up to 1000) from a bucket using a single HTTP request. If you know the
lib/Amazon/S3/Thin.pm view on Meta::CPAN
return $signer_class->new($self->{credentials}, $self->{region});
}
}
sub _default_ua {
my $self = shift;
my $ua = LWP::UserAgent->new(
keep_alive => 10,
requests_redirectable => [qw(GET HEAD DELETE PUT)],
);
$ua->timeout(30);
$ua->env_proxy;
return $ua;
}
# Accessors
sub secure {
my $self = shift;
lib/Amazon/S3/Thin.pm view on Meta::CPAN
sub delete_object {
my ($self, $bucket, $key) = @_;
my $request = $self->_compose_request('DELETE', $self->_resource($bucket, $key));
return $self->_send($request);
}
sub copy_object {
my ($self, $src_bucket, $src_key, $dst_bucket, $dst_key, $headers) = @_;
$headers ||= {};
$headers->{'x-amz-copy-source'} = $src_bucket . "/" . $src_key;
my $request = $self->_compose_request('PUT', $self->_resource($dst_bucket, $dst_key), $headers);
my $res = $self->_send($request);
# XXX: Since the COPY request might return error response in 200 OK, we'll rewrite the status code to 500 for convenience
# ref http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# ref https://github.com/boto/botocore/blob/4e9b4419ec018716ab1a3fe1587fbdc3cfef200e/botocore/handlers.py#L77-L120
if ($self->_looks_like_special_case_error($res)) {
$res->code(500);
}
return $res;
}
lib/Amazon/S3/Thin.pm view on Meta::CPAN
$content = _content_sub($$content);
}
else {
$headers->{'Content-Length'} ||= length $content;
}
if (ref($content)) {
# TODO
# I do not understand what it is :(
#
# return $self->_send_request_expect_nothing_probed('PUT',
# $self->_resource($bucket, $key), $headers, $content);
#
die "unable to handle reference";
}
else {
my $request = $self->_compose_request('PUT', $self->_resource($bucket, $key), $headers, $content);
return $self->_send($request);
}
}
sub list_objects {
my ($self, $bucket, $opt) = @_;
croak 'must specify bucket' unless $bucket;
$opt ||= {};
my $query_string;
lib/Amazon/S3/Thin.pm view on Meta::CPAN
my $content ;
if ($region eq "us-east-1") {
$content = "";
} else {
my $location_constraint = "<LocationConstraint>$region</LocationConstraint>";
$content = <<"EOT";
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">$location_constraint</CreateBucketConfiguration>
EOT
}
my $request = $self->_compose_request('PUT', $self->_resource($bucket), $headers, $content);
return $self->_send($request);
}
sub delete_bucket {
my ($self, $bucket) = @_;
my $request = $self->_compose_request('DELETE', $self->_resource($bucket));
return $self->_send($request);
}
sub generate_presigned_post {
lib/Amazon/S3/Thin.pm view on Meta::CPAN
For more information, please refer to
L<< Amazon's documentation for DELETE|http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html >>.
=head2 copy_object( $src_bucket, $src_key, $dst_bucket, $dst_key [, $headers] )
B<Arguments>: a list with source (bucket, key) and destination (bucket, key), hashref with extra header information (B<optional>).
B<Returns>: an L<HTTP::Response> object for the request.
This method is a variation of the PUT operation as described by
Amazon's S3 API. It creates a copy of an object that is already stored
in Amazon S3. This "PUT copy" operation is the same as performing a GET
from the old bucket/key and then a PUT to the new bucket/key.
Note that the COPY request might return error response in 200 OK, but this method
will determine the error response and rewrite the status code to 500.
For more information, please refer to
L<< Amazon's documentation for COPY|http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html >>.
=head2 put_object( $bucket, $key, $content [, $headers] )
B<Arguments>:
lib/Amazon/S3/Thin.pm view on Meta::CPAN
=item 2. key - a string with the destination key
=item 3. content - a string with the content to be uploaded
=item 4. headers (B<optional>) - hashref with extra header information
=back
B<Returns>: an L<HTTP::Response> object for the request.
The PUT operation adds an object to a bucket. Amazon S3 never adds partial
objects; if you receive a success response, Amazon S3 added the entire
object to the bucket.
For more information, please refer to
L<< Amazon's documentation for PUT|http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html >>.
=head2 delete_multiple_objects( $bucket, @keys )
B<Arguments>: a string with the bucket name, and an array with all the keys
to be deleted.
B<Returns>: an L<HTTP::Response> object for the request.
The Multi-Object Delete operation enables you to delete multiple objects
(up to 1000) from a bucket using a single HTTP request. If you know the
t/02_credentials_metadata.t view on Meta::CPAN
diag "IMDSv2";
my $ua = MockUA->new;
my $credentials = Amazon::S3::Thin::Credentials->from_metadata(+{
%$arg,
ua => $ua,
});
is_deeply $ua->requests, [
{
method => 'PUT',
uri => 'http://169.254.169.254/latest/api/token',
headers => { 'X-aws-ec2-metadata-token-ttl-seconds' => 90 },
},
{
method => 'GET',
uri => 'http://169.254.169.254/latest/meta-data/iam/security-credentials',
headers => { 'X-aws-ec2-metadata-token' => 'DUMMY-METADATA-TOKEN' },
},
{
method => 'GET',
t/02_credentials_metadata.t view on Meta::CPAN
sub get {
my ($self, $uri, %form) = @_;
$self->_request('GET', $uri, %form);
}
sub put {
my ($self, $uri, %form) = @_;
$self->_request('PUT', $uri, %form);
}
sub _request {
my ($self, $method, $uri, %form) = @_;
my $request = {
method => $method,
uri => $uri,
headers => { %{$self->{default_headers}}, %form },
};
t/02_signer_v2.t view on Meta::CPAN
use warnings;
use Amazon::S3::Thin::Credentials;
use Amazon::S3::Thin::Signer::V2;
use Test::More;
use HTTP::Headers;
use HTTP::Request;
# What this test does is only to calculate signature,
# no HTTP communication.
{
diag "test PUT request";
my $secret = "secretfoobar";
my $verb = "PUT";
my $path = "example/file.txt";
my $hdr = HTTP::Headers->new;
$hdr->header("content-length", 15);
$hdr->header("date", 'Sun, 01 Mar 2015 15:11:25 GMT');
my $credentials = Amazon::S3::Thin::Credentials->new('', $secret);
my $signer = Amazon::S3::Thin::Signer::V2->new($credentials);
my $sig = $signer->calculate_signature($verb,$path,$hdr);
is $sig, "/WcvruHFtEoxcEMmdsfLJ6iZClw=";
t/02_signer_v2.t view on Meta::CPAN
is(
$signer->string_to_sign($verb,$path,$hdr),
$string_to_sign,
'string to sign'
);
my $sig = $signer->calculate_signature($verb, $path, $hdr);
is $sig, 'bWq2s1WEIj+Ydj0vQ697zp+IXMU=', "puppy test (GET)";
}
{
diag "test Amazon example object PUT";
my $verb = "PUT";
my $date = "Tue, 27 Mar 2007 21:15:45 +0000";
my $path = "johnsmith/photos/puppy.jpg";
my $content_type = "image/jpeg";
my $content_length = 94328;
my $string_to_sign = "$verb\n\n$content_type\n$date\n/$path";
my $signer = Amazon::S3::Thin::Signer::V2->new($credentials);
my $hdr = HTTP::Headers->new;
$hdr->header("Date", $date);
$hdr->header("Content-Type", $content_type);
$hdr->header("Content-Length", $content_length);
is(
$signer->string_to_sign($verb,$path,$hdr),
$string_to_sign,
'string to sign'
);
my $sig = $signer->calculate_signature($verb, $path, $hdr);
is $sig, 'MyyxeRY7whkBe+bq8fHCL/2kKUg=', "puppy test (PUT)";
}
{
diag "test Amazon example list";
my $verb = "GET";
my $date = "Tue, 27 Mar 2007 19:42:41 +0000";
my $path = "johnsmith/?prefix=photos&max-keys=50&marker=puppy";
my $user_agent = "Mozilla/5.0";
my $string_to_sign = "$verb\n\n\n$date\n/johnsmith/";
t/02_signer_v2.t view on Meta::CPAN
$string_to_sign,
'string to sign'
);
my $sig = $signer->calculate_signature($verb, $path, $hdr);
is $sig, 'lx3byBScXR6KzyMaifNkardMwNk=', "puppy delete (DELETE)";
}
{
diag "test Amazon example upload";
my $verb = "PUT";
my $date = "Tue, 27 Mar 2007 21:06:08 +0000";
# TODO: ports should be stripped from the path for signing
# my $path = "static.johnsmith.net:8080/db-backup.dat.gz";
my $path = "static.johnsmith.net/db-backup.dat.gz";
my $user_agent = "curl/7.15.5";
my $x_amz_acl = "public-read";
my $content_type = "application/x-download";
my $content_md5 = "4gJE4saaMU4BqNR0kLY+lw==";
my @x_amz_meta_reviewed_by = ('joe@johnsmith.net', 'jane@johnsmith.net');
my $x_amz_meta_filechecksum = '0x02661779';
my $x_amz_meta_checksum_algorithm = 'crc32';
my $content_disposition = "attachment; filename=database.dat";
my $content_encoding = "gzip";
my $content_length = 5913339;
my $string_to_sign = "PUT\n4gJE4saaMU4BqNR0kLY+lw==\napplication/x-download\nTue, 27 Mar 2007 21:06:08 +0000\nx-amz-acl:public-read\nx-amz-meta-checksumalgorithm:crc32\nx-amz-meta-filechecksum:0x02661779\nx-amz-meta-reviewedby:joe\@johnsmith.net,...
my $signer = Amazon::S3::Thin::Signer::V2->new($credentials);
my $hdr = HTTP::Headers->new;
$hdr->header("Date", $date);
$hdr->header("User-Agent", $user_agent);
$hdr->header("x-amz-acl", $x_amz_acl);
$hdr->header("content-type", $content_type);
$hdr->header("Content-MD5", $content_md5);
$hdr->header("X-Amz-Meta-ReviewedBy", join(',' => @x_amz_meta_reviewed_by));
$hdr->header("X-Amz-Meta-FileChecksum", $x_amz_meta_filechecksum);
t/02_signer_v2.t view on Meta::CPAN
$hdr->header("Content-Disposition", $content_disposition);
$hdr->header("Content-Encoding", $content_encoding);
$hdr->header("Content-Length", $content_length);
is(
$signer->string_to_sign($verb,$path,$hdr),
$string_to_sign,
'string to sign'
);
my $sig = $signer->calculate_signature($verb, $path, $hdr);
is $sig, 'ilyl83RwaSoYIEdixDQcA4OnAnc=', "puppy upload (PUT)";
}
{
diag "test Amazon example list buckets";
my $verb = "GET";
my $date = "Wed, 28 Mar 2007 01:29:59 +0000";
my $path = "";
my $string_to_sign = "$verb\n\n\n$date\n/$path";
t/03_request.t view on Meta::CPAN
my $res1 = $client->put_object($bucket, $key, $body);
my $res2 = $client->get_object($bucket, $key);
my $res3 = $client->head_object($bucket, $key);
my $req1 = $res1->request;
my $req2 = $res2->request;
my $req3 = $res3->request;
diag "test PUT request";
is $req1->method, "PUT";
is $req1->content, $body;
is $req1->uri, "http://s3.ap-north-east-1.amazonaws.com/tmpfoobar/dir/private.txt";
diag "test GET request";
is $req2->method, "GET";
is $req2->uri, "http://s3.ap-north-east-1.amazonaws.com/tmpfoobar/dir/private.txt";
diag "test HEAD request";
is $req3->method, "HEAD";
is $req3->uri, "http://s3.ap-north-east-1.amazonaws.com/tmpfoobar/dir/private.txt";
t/03_request.t view on Meta::CPAN
is $req5->uri, "http://s3.ap-north-east-1.amazonaws.com/tmpfoobar/?delete=";
is $req5->header('Content-MD5'), 'pjGVehBgNtca8xN21pLCCA==';
diag "test GET request with headers";
my $res6 = $client->get_object($bucket, $key, {"X-Test-Header" => "Foo"});
my $req6 = $res6->request;
is $req6->method, "GET";
is $req6->uri, "http://s3.ap-north-east-1.amazonaws.com/tmpfoobar/dir/private.txt";
is $req6->header("X-Test-Header"), "Foo";
diag "test PUT request (copy) with headers";
my $res7 = $client->copy_object($bucket, $key, $bucket, "copied.txt", {"x-amz-acl" => "public-read"});
my $req7 = $res7->request;
is $req7->method, "PUT";
is $req7->uri, "http://s3.ap-north-east-1.amazonaws.com/tmpfoobar/copied.txt";
is $req7->header("x-amz-copy-source"), "tmpfoobar/dir/private.txt";
is $req7->header("x-amz-acl"), "public-read";
done_testing;
package MockUA;
sub new {
my $class = shift;
t/06_request_virtual_host.t view on Meta::CPAN
my $res1 = $client->put_object($bucket, $key, $body);
my $res2 = $client->get_object($bucket, $key);
my $res3 = $client->head_object($bucket, $key);
my $req1 = $res1->request;
my $req2 = $res2->request;
my $req3 = $res3->request;
diag "test PUT request";
is $req1->method, "PUT";
is $req1->content, $body;
is $req1->uri, "http://tmpfoobar.s3.amazonaws.com/dir/private.txt";
diag "test GET request";
is $req2->method, "GET";
is $req2->uri, "http://tmpfoobar.s3.amazonaws.com/dir/private.txt";
diag "test HEAD request";
is $req3->method, "HEAD";
is $req3->uri, "http://tmpfoobar.s3.amazonaws.com/dir/private.txt";
t/06_request_virtual_host.t view on Meta::CPAN
is $req5->uri, "http://tmpfoobar.s3.amazonaws.com/?delete=";
is $req5->header('Content-MD5'), 'pjGVehBgNtca8xN21pLCCA==';
diag "test GET request with headers";
my $res6 = $client->get_object($bucket, $key, {"X-Test-Header" => "Foo"});
my $req6 = $res6->request;
is $req6->method, "GET";
is $req6->uri, "http://tmpfoobar.s3.amazonaws.com/dir/private.txt";
is $req6->header("X-Test-Header"), "Foo";
diag "test PUT request (copy) with headers";
my $res7 = $client->copy_object($bucket, $key, $bucket, "copied.txt", {"x-amz-acl" => "public-read"});
my $req7 = $res7->request;
is $req7->method, "PUT";
is $req7->uri, "http://tmpfoobar.s3.amazonaws.com/copied.txt";
is $req7->header("x-amz-copy-source"), "tmpfoobar/dir/private.txt";
is $req7->header("x-amz-acl"), "public-read";
done_testing;
package MockUA;
sub new {
my $class = shift;
t/07_copy_200_error.t view on Meta::CPAN
};
my $mock = MockUA->new;
$arg->{ua} = $mock;
my $client = Amazon::S3::Thin->new($arg);
my $bucket = "tmpfoobar";
my $key = "dir/private.txt";
diag "test PUT request (copy) ";
$mock->response(HTTP::Response->new(200));
my $res1 = $client->copy_object($bucket, $key, $bucket, "copied.txt");
is $res1->code, 500;
diag "test PUT request (copy) with headers";
$mock->response(HTTP::Response->new(200, <<'XML'));
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>InternalError</Code>
<Message>...</Message>
<Resource>...</Resource>
<RequestId>...</RequestId>
</Error>
XML
my $res2 = $client->copy_object($bucket, $key, $bucket, "copied.txt");
xt/90_functional.t view on Meta::CPAN
debug => $debug,
};
my $protocol = $use_https ? 'https' : 'http';
my $client = Amazon::S3::Thin->new($arg);
my $key = "dir/s3test.txt";
my $body = "hello amazon s3";
# 0. HEAD to check existance
# 1. DELETE
# 2. PUT to create
# 3. GET
# 4. PUT to update
# 5. DELETE
my $res;
my $req;
diag "DELETE request";
$res = $client->delete_object($bucket, $key);
$req = $res->request;
is $res->code, 204;
is $req->method, "DELETE";
is $req->content, '';
is $req->uri, "$protocol://$host/$bucket/dir/s3test.txt";
xt/90_functional.t view on Meta::CPAN
is $req->uri, "$protocol://$host/$bucket/dir/s3test.txt";
diag "GET request";
$res = $client->get_object($bucket, $key);
$req = $res->request;
ok !$res->is_success, "is not success";
is $res->code, 404;
is $req->method, "GET";
is $req->uri, "$protocol://$host/$bucket/dir/s3test.txt";
diag "PUT request";
$res = $client->put_object($bucket, $key, $body);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "PUT";
is $req->content, $body;
is $req->uri, "$protocol://$host/$bucket/dir/s3test.txt";
diag "HEAD request";
$res = $client->head_object($bucket, $key);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "HEAD";
is $req->content, '';
is $req->uri, "$protocol://$host/$bucket/dir/s3test.txt";
like $res->header("x-amz-request-id"), qr/.+/, "has proper headers";
diag "COPY request";
my $key2 = $key . "_copied";
$res = $client->copy_object($bucket, $key, $bucket, $key2);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "PUT";
diag "GET request";
$res = $client->get_object($bucket, $key2);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "GET";
is $req->uri, "$protocol://$host/$bucket/dir/s3test.txt_copied";
xt/93_session_token.t view on Meta::CPAN
use warnings;
use Test::More;
use Config::Tiny;
use File::Basename qw(basename);
use LWP::UserAgent;
use Amazon::S3::Thin;
=head1 HOW TO TEST
PROFILE=__PUT_HERE_YOUR_PROFILE__
ROLE_ARN=__PUT_HERE_YOUR_ROLE_ARN__
TEMP_SESSION=$(aws --profile ${PROFILE} sts assume-role --role-arn ${ROLE_ARN} --role-session-name s3thin-test-session)
export AWS_ACCESS_KEY_ID=$(echo ${TEMP_SESSION} | jq -r '.Credentials.AccessKeyId')
export AWS_SECRET_ACCESS_KEY=$(echo ${TEMP_SESSION} | jq -r '.Credentials.SecretAccessKey')
export AWS_SESSION_TOKEN=$(echo ${TEMP_SESSION} | jq -r '.Credentials.SessionToken')
=cut
if (!$ENV{EXTENDED_TESTING}) {
plan skip_all => 'Skip functional test because it would call S3 APIs and charge real money. $ENV{EXTENDED_TESTING} is not set.';
xt/94_virtual_host.t view on Meta::CPAN
virtual_host => 1,
};
my $protocol = $use_https ? 'https' : 'http';
my $client = Amazon::S3::Thin->new($arg);
my $key = "dir/s3test.txt";
my $body = "hello amazon s3";
# 0. HEAD to check existance
# 1. DELETE
# 2. PUT to create
# 3. GET
# 4. PUT to update
# 5. DELETE
my $res;
my $req;
diag "DELETE request";
$res = $client->delete_object($bucket, $key);
$req = $res->request;
is $res->code, 204;
is $req->method, "DELETE";
is $req->content, '';
is $req->uri, "$protocol://$host/dir/s3test.txt";
xt/94_virtual_host.t view on Meta::CPAN
is $req->uri, "$protocol://$host/dir/s3test.txt";
diag "GET request";
$res = $client->get_object($bucket, $key);
$req = $res->request;
ok !$res->is_success, "is not success";
is $res->code, 404;
is $req->method, "GET";
is $req->uri, "$protocol://$host/dir/s3test.txt";
diag "PUT request";
$res = $client->put_object($bucket, $key, $body);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "PUT";
is $req->content, $body;
is $req->uri, "$protocol://$host/dir/s3test.txt";
diag "HEAD request";
$res = $client->head_object($bucket, $key);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "HEAD";
is $req->content, '';
is $req->uri, "$protocol://$host/dir/s3test.txt";
like $res->header("x-amz-request-id"), qr/.+/, "has proper headers";
diag "COPY request";
my $key2 = $key . "_copied";
$res = $client->copy_object($bucket, $key, $bucket, $key2);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "PUT";
diag "GET request";
$res = $client->get_object($bucket, $key2);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "GET";
is $req->uri, "$protocol://$host/dir/s3test.txt_copied";
xt/95_delete_multiple_objects.t view on Meta::CPAN
my $protocol = $use_https ? 'https' : 'http';
my $client = Amazon::S3::Thin->new($arg);
my $key1 = "dir/s3test_1.txt";
my $key2 = "dir/s3test_2.txt";
my $body = "hello amazon s3";
my $res;
my $req;
diag "PUT request";
$res = $client->put_object($bucket, $key1, $body);
ok $res->is_success, "is_success";
$res = $client->put_object($bucket, $key2, $body);
ok $res->is_success, "is_success";
diag "DELETE request";
$res = $client->delete_multiple_objects($bucket, $key1, $key2);
ok $res->is_success, "is_success";
$req = $res->request;
is $req->method, "POST";