App-Fetchware
view release on metacpan or search on metacpan
lib/Test/Fetchware.pm view on Meta::CPAN
package Test::Fetchware;
our $VERSION = '1.016'; # VERSION: generated by DZP::OurPkgVersion
# ABSTRACT: Provides testing subroutines for App::Fetchware.
use strict;
use warnings;
# CPAN modules making Fetchwarefile better.
use File::Temp 'tempdir';
use File::Spec::Functions qw(catfile rel2abs updir tmpdir);
use Test::More 0.98; # some utility test subroutines need it.
use Cwd;
use Archive::Tar;
use Path::Class;
use Digest::MD5;
use Fcntl qw(:flock :mode);
use Perl::OSType 'is_os_type';
use File::Temp 'tempfile';
use File::Path 'remove_tree';
use App::Fetchware::Util ':UTIL';
use App::Fetchware::Config ':CONFIG';
# Enable Perl 6 knockoffs, and use 5.10.1, because smartmatching and other
# things in 5.10 were changed in 5.10.1+.
use 5.010001;
# Set up Exporter to bring App::Fetchware's API to everyone who use's it
# including fetchware's ability to let you rip into its guts, and customize it
# as you need.
use Exporter qw( import );
# By default fetchware exports its configuration file like subroutines and
# fetchware().
#
# These tags go with the override() subroutine, and together allow you to
# replace some or all of fetchware's default behavior to install unusual
# software.
our %EXPORT_TAGS = (
TESTING => [qw(
eval_ok
print_ok
fork_ok
fork_not_ok
skip_all_unless_release_testing
make_clean
make_test_dist
md5sum_file
expected_filename_listing
verbose_on
export_ok
end_ok
add_prefix_if_nonroot
create_test_fetchwarefile
rmdashr_ok
)],
);
# *All* entries in @EXPORT_TAGS must also be in @EXPORT_OK.
our @EXPORT_OK = map {@{$_}} values %EXPORT_TAGS;
sub eval_ok {
my ($code, $expected_exception_text_or_regex, $test_name) = @_;
eval {$code->()};
# Test if an exception was actually thrown.
if (not defined $@) {
BAIL_OUT("[$test_name]'s provided code did not actually throw an exception");
}
# Support regexing the thrown exception's test if needed.
if (ref $expected_exception_text_or_regex ne 'Regexp') {
is($@, $expected_exception_text_or_regex, $test_name);
} elsif (ref $expected_exception_text_or_regex eq 'Regexp') {
like($@, qr/$expected_exception_text_or_regex/, $test_name);
}
}
sub print_ok {
my ($printer, $expected, $test_name) = @_;
my $error;
my $stdout;
# Use eval to catch errors that $printer->() could possibly throw.
eval {
local *STDOUT;
# Turn on Autoflush mode, so each time print is called it causes perl to
# flush STDOUT's buffer. Otherwise a write could happen, that may not
# actually get written before this eval closes, causing $stdout to stay
# undef instead of getting whatever was written to STDOUT.
$| = 1;
open STDOUT, '>', \$stdout
or $error = 'Can\'t open STDOUT to test cmd_upgrade using cmd_list';
# Execute $printer
$printer->();
close STDOUT
or $error = 'WTF! closing STDOUT actually failed! Huh?';
} or do {
$error = $@ if $@;
fail($error) if defined $error;
};
# Since Test::More's testing subroutines return true or false if the test
# passes or fails, return this true or false value back to the caller.
if (ref($expected) eq '') {
return is($stdout, $expected,
$test_name);
} elsif (ref($expected) eq 'Regexp') {
return like($stdout, $expected,
$test_name);
} elsif (ref($expected) eq 'CODE') {
# Call the provided callback with what $printer->() printed.
return ok($expected->($stdout),
$test_name);
}
}
sub fork_ok {
my $coderef = shift;
my $test_name = shift;
my $kid = fork;
die "Couldn't fork: $!\n" if not defined $kid;
# ... parent code here ...
if ( $kid ) {
# Block waiting for the child process ($kid) to exit.
waitpid($kid, 0);
}
# ... child code here ...
else {
# Run caller's code wihtout any args.
# And exit based on the success or failure of $coderef.
$coderef->() ? exit 0 : exit 1;
}
# And test that the child returned successfully.
ok(($? >> 8) == 0, $test_name);
return $?;
}
sub fork_not_ok {
my $coderef = shift;
my $test_name = shift;
my $kid = fork;
die "Couldn't fork: $!\n" if not defined $kid;
# ... parent code here ...
if ( $kid ) {
# Block waiting for the child process ($kid) to exit.
waitpid($kid, 0);
}
# ... child code here ...
else {
# Run caller's code wihtout any args.
# And exit based on the success or failure of $coderef.
$coderef->() ? exit 0 : exit 1;
}
# Check that the child failed and returned nonzero.
ok(($? >> 8) != 0, $test_name);
return $?;
}
sub skip_all_unless_release_testing {
if (not exists $ENV{FETCHWARE_RELEASE_TESTING}
or not defined $ENV{FETCHWARE_RELEASE_TESTING}
or $ENV{FETCHWARE_RELEASE_TESTING}
ne '***setting this will install software on your computer!!!!!!!***'
# Enforce having *all* other FETCHWARE_* env vars set too to make it
# even harder to easily enable FETCHWARE_RELEASE_TESTING. This is
# because FETCHWARE_RELEASE_TESTING *installs* software on your
# computer.
#
# Furthermore, the env vars below are required for
# FETCHWARE_RELEASE_TESTING to work properly, so without them being set,
# then FETCHWARE_RELEASE_TESTING will not work properly, because these
# env vars will be undef; therefore, check to see if they're enabled.
) {
plan skip_all => 'Not testing for release.';
}
}
sub make_clean {
BAIL_OUT(<<EOF) if -e 'lib/Test/Fetchware.pm' && -e 't/App-Fetchware-build.t';
Running make_clean() inside of fetchware's own directory! make_clean() should
only be called inside testing build directories, and perhaps also only called if
FETCHWARE_RELEASE_TESTING has been set.
EOF
system('make', 'clean');
chdir(updir()) or fail(q{Can't chdir(updir())!});
}
lib/Test/Fetchware.pm view on Meta::CPAN
=item * Then use a regex comparision just like Test::More's like() function.
=back
=item * If $expected is a CODEREF according to ref()
=over
=item * Then execute the coderef with a copy of the $printer's STDOUT and use the result of that expression to determine if the test passed or failed .
=back
=back
=over
NOTICE: C<print_ok()'s> manipuation of STDOUT only works for the current Perl
process. STDOUT may be inherited by forks, but for some reason my knowledge of
Perl and Unix lacks a better explanation other than that print_ok() does not
work for testing what C<fork()ed> and C<exec()ed> processes do such as those
executed with run_prog().
I also have not tested other possibilities, such as using IO::Handle to
manipulate STDOUT, or tie()ing STDOUT like Test::Output does. These methods
probably would not survive a fork() and an exec() though either.
=back
=head2 fork_ok()
fork_ok(&code_fork_should_do, $test_name);
Simply properly forks, and runs the caller's provided coderef in the child,
and tests that the child's exit value is 0 for success using a simple ok() call from
Test::More. The child's exit value is controlled by the caller based on what
&code_fork_should_do returns. If &code_fork_should_do returns true, then the
child returns C<0> for success, and if &code_fork_should_do returns false, then
the child returns C<1> for failure.
Because the fork()ed child is a copy of the current perl process you can still
access whatever Test::More or Test::Fetchware testing subroutines you may have
imported for use in the test file that uses fork_ok().
This testing helper subroutine only exists for testing fetchware's command line
interface. This interface is fetchware's run() subroutine and when you actually
execute the fetchware program from the command line such as C<fetchware help>.
=over
=item WARNING
fork_ok() has a major bug that makes any tests you attempt to run in
&code_fork_should_do that fail never report this failure properly to
Test::Builder. Also, any success is not reported either. This is not fork_ok()'s
fault it is Test::Builder's fault for still not having support for forking. This
lack of support for forking may be fixed in Test::Builder 1.5 or perhaps 2.0,
but those are still in development.
=back
=head2 fork_not_ok()
fork_not_ok(&code_fork_should_do, $test_name);
The exact same thing as fork_ok() except it expects failure and reports true
when the provided coderef returns failure. If the provided coderef returns true,
then it reports failure to the test suite.
The same warnings and problems associated with fork_ok() apply to fork_not_ok().
=head2 skip_all_unless_release_testing()
subtest 'some subtest that tests fetchware' => sub {
skip_all_unless_release_testing();
# ... Your tests go here that will be skipped unless
# FETCHWARE_RELEASE_TESTING among other env vars are set properly.
};
Skips all tests in your test file or subtest() if fetchware's testing
environment variable, C<FETCHWARE_RELEASE_TESTING>, is not set to its proper
value. See L<App::Fetchware/2. Call skip_all_unless_release_testing() as needed>
for more information.
=over
=item WARNING
If you call skip_all_unless_release_testing() in your main test file without
being enclosed inside a subtest, then skip_all_unless_release_testing() will
skip all of your test from that point on till then end of the file, so be
careful where you use it, or just I<only> use it in subtests to be safe.
=back
=head2 make_clean()
make_clean();
Runs C<make clean> and then chdirs to the parent directory. This subroutine is
used in build() and install()'s test scripts to run make clean in between test
runs. If you override build() or install() you may wish to use make_clean to
automate this for you.
make_clean() also makes some simple checks to ensure that you are not running it
inside of fetchware's own build directory. If it detects this, it BAIL_OUT()'s
of the test file to indicate that the test file has gone crazy, and is about to
do something it shouldn't.
=head2 make_test_dist()
my $test_dist_path = make_test_dist(
file_name => $file_name,
ver_num = $ver_num,
# These are all optional...
destination_directory => rel2abs($destination_directory),
fetchwarefile => $fetchwarefile,
# You can only specify fetchwarefile *or* append_option.
append_option => q{fetchware_option 'some value';},
configure => <<EOF,
#!/bin/sh
# A test ./configure for testing ./configure failure...it always fails.
echo "fetchware: ./configure failed!
# Return failure exit status to truly indicate failure.
exit 1
EOF
makefile => <<EOF,
( run in 1.314 second using v1.01-cache-2.11-cpan-39bf76dae61 )