AWS-S3
view release on metacpan or search on metacpan
README.markdown view on Meta::CPAN
$new_file = $bucket->add_file(
key => 'foo/bar.txt',
contents => sub { return \"This is the contents" }
);
# Get the file:
my $same_file = $bucket->file( 'foo/bar.txt' );
# Get the contents:
my $scalar_ref = $same_file->contents;
print $$scalar_ref;
# Update the contents with a scalar ref:
$same_file->contents( \"New file contents" );
# Update the contents with a code ref:
$same_file->contents( sub { return \"New file contents" } );
# Delete the file:
$same_file->delete();
lib/AWS/S3.pm view on Meta::CPAN
$new_file = $bucket->add_file(
key => 'foo/bar.txt',
contents => sub { return \"This is the contents" }
);
# Get the file:
my $same_file = $bucket->file( 'foo/bar.txt' );
# Get the contents:
my $scalar_ref = $same_file->contents;
print $$scalar_ref;
# Update the contents with a scalar ref:
$same_file->contents( \"New file contents" );
# Update the contents with a code ref:
$same_file->contents( sub { return \"New file contents" } );
# Delete the file:
$same_file->delete();
lib/AWS/S3/File.pm view on Meta::CPAN
=head1 NAME
AWS::S3::File - A single file in Amazon S3
=head1 SYNOPSIS
my $file = $bucket->file('foo/bar.txt');
# contents is a scalarref:
print @{ $file->contents };
print $file->size;
print $file->key;
print $file->etag;
print $file->lastmodified;
print $file->owner->display_name;
print $file->bucket->name;
# Set the contents with a scalarref:
my $new_contents = "This is the new contents of the file.";
$file->contents( \$new_contents );
# Set the contents with a coderef:
$file->contents( sub {
return \$new_contents;
});
# Alternative update
$file->update(
contents => \'New contents', # optional
contenttype => 'text/plain' # optional
);
# Get signed URL for the file for public access
print $file->signed_url( $expiry_time );
# Delete the file:
$file->delete();
=head1 DESCRIPTION
AWS::S3::File provides a convenience wrapper for dealing with files stored in S3.
=head1 PUBLIC PROPERTIES
lib/AWS/S3/FileIterator.pm view on Meta::CPAN
# Optional params:
pattern => qr(\.txt$),
prefix => 'notes',
);
while( my @files = $iter->next_page )
{
warn $iter->page_number, "\n";
foreach my $file ( @files )
{
print "\t", $file->key, "\n";
}# end foreach()
}# end while()
=head1 DESCRIPTION
AWS::S3::FileIterator provides a means of I<iterating> through your S3 files.
If you only have a few files it might seem odd to require an iterator, but if you
have thousands (or millions) of files, the iterator will save you a lot of effort.
t/010_basic.t view on Meta::CPAN
ok ! $bucket->file($filename), 'file no longer exists in bucket';
};
ADD_MANY_FILES: {
my %info = ( );
# Add the files:
for( 0..25 )
{
my $contents = "Contents of file $_\n"x4;
my $key = "bar/baz/foo." . sprintf("%03d", $_) . ".txt";
$info{$key} = $contents;
ok $bucket->add_file(
key => $key,
contents => \$contents,
), "Added file $_";
}# end for()
# Make sure they all worked:
my $counted = 0;
foreach my $key ( sort keys %info )
t/010_basic.t view on Meta::CPAN
}# end foreach()
last;
}# end while()
# Make sure that if we say we want to start on page 11, we *start* on page 11:
$iter = $bucket->files( page_size => 1, page_number => 18 );
SMALL_ITER: {
for( 18..25 )
{
my ($file) = $iter->next_page;
my $number = sprintf('%03d', $_);
is $file->key, "bar/baz/foo.$number.txt", "file $number is what we expected";
}# end for()
};
# How about when our page size is larger than what we get back from S3?:
# $iter = $bucket->files( page_size => 105, page_number => 2 );
# BIG_ITER: {
# my @files = $iter->next_page;
# for( 106..116 )
# {
( run in 0.486 second using v1.01-cache-2.11-cpan-de7293f3b23 )