aboutsummaryrefslogtreecommitdiff
path: root/Perl/t/Test
diff options
context:
space:
mode:
authorcvs2svn2005-01-24 15:46:33 +0000
committercvs2svn2005-01-24 15:46:33 +0000
commite6a05dbef707dc10e546ef8fef8fc2a8b7d805bf (patch)
tree7061a2d781348fa7d5965b7d7dd5d7ef6b27a7b0 /Perl/t/Test
parente1ed2c6d78bb616e24166c13126adba1b95ea4ea (diff)
downloadtkzinc-POSTSCRIPT.zip
tkzinc-POSTSCRIPT.tar.gz
tkzinc-POSTSCRIPT.tar.bz2
tkzinc-POSTSCRIPT.tar.xz
This commit was manufactured by cvs2svn to create branch 'POSTSCRIPT'.POSTSCRIPT
Diffstat (limited to 'Perl/t/Test')
-rw-r--r--Perl/t/Test/Builder.pm1408
-rw-r--r--Perl/t/Test/Harness.pm1168
-rw-r--r--Perl/t/Test/Harness/Assert.pm68
-rw-r--r--Perl/t/Test/Harness/Iterator.pm61
-rw-r--r--Perl/t/Test/Harness/Straps.pm667
-rw-r--r--Perl/t/Test/More.pm1248
6 files changed, 0 insertions, 4620 deletions
diff --git a/Perl/t/Test/Builder.pm b/Perl/t/Test/Builder.pm
deleted file mode 100644
index 6f3edd8..0000000
--- a/Perl/t/Test/Builder.pm
+++ /dev/null
@@ -1,1408 +0,0 @@
-package Test::Builder;
-
-use 5.004;
-
-# $^C was only introduced in 5.005-ish. We do this to prevent
-# use of uninitialized value warnings in older perls.
-$^C ||= 0;
-
-use strict;
-use vars qw($VERSION $CLASS);
-$VERSION = '0.17';
-$CLASS = __PACKAGE__;
-
-my $IsVMS = $^O eq 'VMS';
-
-# Make Test::Builder thread-safe for ithreads.
-BEGIN {
- use Config;
- if( $] >= 5.008 && $Config{useithreads} ) {
- require threads;
- require threads::shared;
- threads::shared->import;
- }
- else {
- *share = sub { 0 };
- *lock = sub { 0 };
- }
-}
-
-use vars qw($Level);
-my($Test_Died) = 0;
-my($Have_Plan) = 0;
-my $Original_Pid = $$;
-my $Curr_Test = 0; share($Curr_Test);
-my @Test_Results = (); share(@Test_Results);
-my @Test_Details = (); share(@Test_Details);
-
-
-=head1 NAME
-
-Test::Builder - Backend for building test libraries
-
-=head1 SYNOPSIS
-
- package My::Test::Module;
- use Test::Builder;
- require Exporter;
- @ISA = qw(Exporter);
- @EXPORT = qw(ok);
-
- my $Test = Test::Builder->new;
- $Test->output('my_logfile');
-
- sub import {
- my($self) = shift;
- my $pack = caller;
-
- $Test->exported_to($pack);
- $Test->plan(@_);
-
- $self->export_to_level(1, $self, 'ok');
- }
-
- sub ok {
- my($test, $name) = @_;
-
- $Test->ok($test, $name);
- }
-
-
-=head1 DESCRIPTION
-
-Test::Simple and Test::More have proven to be popular testing modules,
-but they're not always flexible enough. Test::Builder provides the a
-building block upon which to write your own test libraries I<which can
-work together>.
-
-=head2 Construction
-
-=over 4
-
-=item B<new>
-
- my $Test = Test::Builder->new;
-
-Returns a Test::Builder object representing the current state of the
-test.
-
-Since you only run one test per program, there is B<one and only one>
-Test::Builder object. No matter how many times you call new(), you're
-getting the same object. (This is called a singleton).
-
-=cut
-
-my $Test;
-sub new {
- my($class) = shift;
- $Test ||= bless ['Move along, nothing to see here'], $class;
- return $Test;
-}
-
-=back
-
-=head2 Setting up tests
-
-These methods are for setting up tests and declaring how many there
-are. You usually only want to call one of these methods.
-
-=over 4
-
-=item B<exported_to>
-
- my $pack = $Test->exported_to;
- $Test->exported_to($pack);
-
-Tells Test::Builder what package you exported your functions to.
-This is important for getting TODO tests right.
-
-=cut
-
-my $Exported_To;
-sub exported_to {
- my($self, $pack) = @_;
-
- if( defined $pack ) {
- $Exported_To = $pack;
- }
- return $Exported_To;
-}
-
-=item B<plan>
-
- $Test->plan('no_plan');
- $Test->plan( skip_all => $reason );
- $Test->plan( tests => $num_tests );
-
-A convenient way to set up your tests. Call this and Test::Builder
-will print the appropriate headers and take the appropriate actions.
-
-If you call plan(), don't call any of the other methods below.
-
-=cut
-
-sub plan {
- my($self, $cmd, $arg) = @_;
-
- return unless $cmd;
-
- if( $Have_Plan ) {
- die sprintf "You tried to plan twice! Second plan at %s line %d\n",
- ($self->caller)[1,2];
- }
-
- if( $cmd eq 'no_plan' ) {
- $self->no_plan;
- }
- elsif( $cmd eq 'skip_all' ) {
- return $self->skip_all($arg);
- }
- elsif( $cmd eq 'tests' ) {
- if( $arg ) {
- return $self->expected_tests($arg);
- }
- elsif( !defined $arg ) {
- die "Got an undefined number of tests. Looks like you tried to ".
- "say how many tests you plan to run but made a mistake.\n";
- }
- elsif( !$arg ) {
- die "You said to run 0 tests! You've got to run something.\n";
- }
- }
- else {
- require Carp;
- my @args = grep { defined } ($cmd, $arg);
- Carp::croak("plan() doesn't understand @args");
- }
-
- return 1;
-}
-
-=item B<expected_tests>
-
- my $max = $Test->expected_tests;
- $Test->expected_tests($max);
-
-Gets/sets the # of tests we expect this test to run and prints out
-the appropriate headers.
-
-=cut
-
-my $Expected_Tests = 0;
-sub expected_tests {
- my($self, $max) = @_;
-
- if( defined $max ) {
- $Expected_Tests = $max;
- $Have_Plan = 1;
-
- $self->_print("1..$max\n") unless $self->no_header;
- }
- return $Expected_Tests;
-}
-
-
-=item B<no_plan>
-
- $Test->no_plan;
-
-Declares that this test will run an indeterminate # of tests.
-
-=cut
-
-my($No_Plan) = 0;
-sub no_plan {
- $No_Plan = 1;
- $Have_Plan = 1;
-}
-
-=item B<has_plan>
-
- $plan = $Test->has_plan
-
-Find out whether a plan has been defined. $plan is either C<undef> (no plan has been set), C<no_plan> (indeterminate # of tests) or an integer (the number of expected tests).
-
-=cut
-
-sub has_plan {
- return($Expected_Tests) if $Expected_Tests;
- return('no_plan') if $No_Plan;
- return(undef);
-};
-
-
-=item B<skip_all>
-
- $Test->skip_all;
- $Test->skip_all($reason);
-
-Skips all the tests, using the given $reason. Exits immediately with 0.
-
-=cut
-
-my $Skip_All = 0;
-sub skip_all {
- my($self, $reason) = @_;
-
- my $out = "1..0";
- $out .= " # Skip $reason" if $reason;
- $out .= "\n";
-
- $Skip_All = 1;
-
- $self->_print($out) unless $self->no_header;
- exit(0);
-}
-
-=back
-
-=head2 Running tests
-
-These actually run the tests, analogous to the functions in
-Test::More.
-
-$name is always optional.
-
-=over 4
-
-=item B<ok>
-
- $Test->ok($test, $name);
-
-Your basic test. Pass if $test is true, fail if $test is false. Just
-like Test::Simple's ok().
-
-=cut
-
-sub ok {
- my($self, $test, $name) = @_;
-
- # $test might contain an object which we don't want to accidentally
- # store, so we turn it into a boolean.
- $test = $test ? 1 : 0;
-
- unless( $Have_Plan ) {
- require Carp;
- Carp::croak("You tried to run a test without a plan! Gotta have a plan.");
- }
-
- lock $Curr_Test;
- $Curr_Test++;
-
- $self->diag(<<ERR) if defined $name and $name =~ /^[\d\s]+$/;
- You named your test '$name'. You shouldn't use numbers for your test names.
- Very confusing.
-ERR
-
- my($pack, $file, $line) = $self->caller;
-
- my $todo = $self->todo($pack);
-
- my $out;
- my $result = {};
- share($result);
-
- unless( $test ) {
- $out .= "not ";
- @$result{ 'ok', 'actual_ok' } = ( ( $todo ? 1 : 0 ), 0 );
- }
- else {
- @$result{ 'ok', 'actual_ok' } = ( 1, $test );
- }
-
- $out .= "ok";
- $out .= " $Curr_Test" if $self->use_numbers;
-
- if( defined $name ) {
- $name =~ s|#|\\#|g; # # in a name can confuse Test::Harness.
- $out .= " - $name";
- $result->{name} = $name;
- }
- else {
- $result->{name} = '';
- }
-
- if( $todo ) {
- my $what_todo = $todo;
- $out .= " # TODO $what_todo";
- $result->{reason} = $what_todo;
- $result->{type} = 'todo';
- }
- else {
- $result->{reason} = '';
- $result->{type} = '';
- }
-
- $Test_Results[$Curr_Test-1] = $result;
- $out .= "\n";
-
- $self->_print($out);
-
- unless( $test ) {
- my $msg = $todo ? "Failed (TODO)" : "Failed";
- $self->diag(" $msg test ($file at line $line)\n");
- }
-
- return $test ? 1 : 0;
-}
-
-=item B<is_eq>
-
- $Test->is_eq($got, $expected, $name);
-
-Like Test::More's is(). Checks if $got eq $expected. This is the
-string version.
-
-=item B<is_num>
-
- $Test->is_num($got, $expected, $name);
-
-Like Test::More's is(). Checks if $got == $expected. This is the
-numeric version.
-
-=cut
-
-sub is_eq {
- my($self, $got, $expect, $name) = @_;
- local $Level = $Level + 1;
-
- if( !defined $got || !defined $expect ) {
- # undef only matches undef and nothing else
- my $test = !defined $got && !defined $expect;
-
- $self->ok($test, $name);
- $self->_is_diag($got, 'eq', $expect) unless $test;
- return $test;
- }
-
- return $self->cmp_ok($got, 'eq', $expect, $name);
-}
-
-sub is_num {
- my($self, $got, $expect, $name) = @_;
- local $Level = $Level + 1;
-
- if( !defined $got || !defined $expect ) {
- # undef only matches undef and nothing else
- my $test = !defined $got && !defined $expect;
-
- $self->ok($test, $name);
- $self->_is_diag($got, '==', $expect) unless $test;
- return $test;
- }
-
- return $self->cmp_ok($got, '==', $expect, $name);
-}
-
-sub _is_diag {
- my($self, $got, $type, $expect) = @_;
-
- foreach my $val (\$got, \$expect) {
- if( defined $$val ) {
- if( $type eq 'eq' ) {
- # quote and force string context
- $$val = "'$$val'"
- }
- else {
- # force numeric context
- $$val = $$val+0;
- }
- }
- else {
- $$val = 'undef';
- }
- }
-
- return $self->diag(sprintf <<DIAGNOSTIC, $got, $expect);
- got: %s
- expected: %s
-DIAGNOSTIC
-
-}
-
-=item B<isnt_eq>
-
- $Test->isnt_eq($got, $dont_expect, $name);
-
-Like Test::More's isnt(). Checks if $got ne $dont_expect. This is
-the string version.
-
-=item B<isnt_num>
-
- $Test->is_num($got, $dont_expect, $name);
-
-Like Test::More's isnt(). Checks if $got ne $dont_expect. This is
-the numeric version.
-
-=cut
-
-sub isnt_eq {
- my($self, $got, $dont_expect, $name) = @_;
- local $Level = $Level + 1;
-
- if( !defined $got || !defined $dont_expect ) {
- # undef only matches undef and nothing else
- my $test = defined $got || defined $dont_expect;
-
- $self->ok($test, $name);
- $self->_cmp_diag('ne', $got, $dont_expect) unless $test;
- return $test;
- }
-
- return $self->cmp_ok($got, 'ne', $dont_expect, $name);
-}
-
-sub isnt_num {
- my($self, $got, $dont_expect, $name) = @_;
- local $Level = $Level + 1;
-
- if( !defined $got || !defined $dont_expect ) {
- # undef only matches undef and nothing else
- my $test = defined $got || defined $dont_expect;
-
- $self->ok($test, $name);
- $self->_cmp_diag('!=', $got, $dont_expect) unless $test;
- return $test;
- }
-
- return $self->cmp_ok($got, '!=', $dont_expect, $name);
-}
-
-
-=item B<like>
-
- $Test->like($this, qr/$regex/, $name);
- $Test->like($this, '/$regex/', $name);
-
-Like Test::More's like(). Checks if $this matches the given $regex.
-
-You'll want to avoid qr// if you want your tests to work before 5.005.
-
-=item B<unlike>
-
- $Test->unlike($this, qr/$regex/, $name);
- $Test->unlike($this, '/$regex/', $name);
-
-Like Test::More's unlike(). Checks if $this B<does not match> the
-given $regex.
-
-=cut
-
-sub like {
- my($self, $this, $regex, $name) = @_;
-
- local $Level = $Level + 1;
- $self->_regex_ok($this, $regex, '=~', $name);
-}
-
-sub unlike {
- my($self, $this, $regex, $name) = @_;
-
- local $Level = $Level + 1;
- $self->_regex_ok($this, $regex, '!~', $name);
-}
-
-=item B<maybe_regex>
-
- $Test->maybe_regex(qr/$regex/);
- $Test->maybe_regex('/$regex/');
-
-Convenience method for building testing functions that take regular
-expressions as arguments, but need to work before perl 5.005.
-
-Takes a quoted regular expression produced by qr//, or a string
-representing a regular expression.
-
-Returns a Perl value which may be used instead of the corresponding
-regular expression, or undef if it's argument is not recognised.
-
-For example, a version of like(), sans the useful diagnostic messages,
-could be written as:
-
- sub laconic_like {
- my ($self, $this, $regex, $name) = @_;
- my $usable_regex = $self->maybe_regex($regex);
- die "expecting regex, found '$regex'\n"
- unless $usable_regex;
- $self->ok($this =~ m/$usable_regex/, $name);
- }
-
-=cut
-
-
-sub maybe_regex {
- my ($self, $regex) = @_;
- my $usable_regex = undef;
- if( ref $regex eq 'Regexp' ) {
- $usable_regex = $regex;
- }
- # Check if it looks like '/foo/'
- elsif( my($re, $opts) = $regex =~ m{^ /(.*)/ (\w*) $ }sx ) {
- $usable_regex = length $opts ? "(?$opts)$re" : $re;
- };
- return($usable_regex)
-};
-
-sub _regex_ok {
- my($self, $this, $regex, $cmp, $name) = @_;
-
- local $Level = $Level + 1;
-
- my $ok = 0;
- my $usable_regex = $self->maybe_regex($regex);
- unless (defined $usable_regex) {
- $ok = $self->ok( 0, $name );
- $self->diag(" '$regex' doesn't look much like a regex to me.");
- return $ok;
- }
-
- {
- local $^W = 0;
- my $test = $this =~ /$usable_regex/ ? 1 : 0;
- $test = !$test if $cmp eq '!~';
- $ok = $self->ok( $test, $name );
- }
-
- unless( $ok ) {
- $this = defined $this ? "'$this'" : 'undef';
- my $match = $cmp eq '=~' ? "doesn't match" : "matches";
- $self->diag(sprintf <<DIAGNOSTIC, $this, $match, $regex);
- %s
- %13s '%s'
-DIAGNOSTIC
-
- }
-
- return $ok;
-}
-
-=item B<cmp_ok>
-
- $Test->cmp_ok($this, $type, $that, $name);
-
-Works just like Test::More's cmp_ok().
-
- $Test->cmp_ok($big_num, '!=', $other_big_num);
-
-=cut
-
-sub cmp_ok {
- my($self, $got, $type, $expect, $name) = @_;
-
- my $test;
- {
- local $^W = 0;
- local($@,$!); # don't interfere with $@
- # eval() sometimes resets $!
- $test = eval "\$got $type \$expect";
- }
- local $Level = $Level + 1;
- my $ok = $self->ok($test, $name);
-
- unless( $ok ) {
- if( $type =~ /^(eq|==)$/ ) {
- $self->_is_diag($got, $type, $expect);
- }
- else {
- $self->_cmp_diag($got, $type, $expect);
- }
- }
- return $ok;
-}
-
-sub _cmp_diag {
- my($self, $got, $type, $expect) = @_;
-
- $got = defined $got ? "'$got'" : 'undef';
- $expect = defined $expect ? "'$expect'" : 'undef';
- return $self->diag(sprintf <<DIAGNOSTIC, $got, $type, $expect);
- %s
- %s
- %s
-DIAGNOSTIC
-}
-
-=item B<BAILOUT>
-
- $Test->BAILOUT($reason);
-
-Indicates to the Test::Harness that things are going so badly all
-testing should terminate. This includes running any additional test
-scripts.
-
-It will exit with 255.
-
-=cut
-
-sub BAILOUT {
- my($self, $reason) = @_;
-
- $self->_print("Bail out! $reason");
- exit 255;
-}
-
-=item B<skip>
-
- $Test->skip;
- $Test->skip($why);
-
-Skips the current test, reporting $why.
-
-=cut
-
-sub skip {
- my($self, $why) = @_;
- $why ||= '';
-
- unless( $Have_Plan ) {
- require Carp;
- Carp::croak("You tried to run tests without a plan! Gotta have a plan.");
- }
-
- lock($Curr_Test);
- $Curr_Test++;
-
- my %result;
- share(%result);
- %result = (
- 'ok' => 1,
- actual_ok => 1,
- name => '',
- type => 'skip',
- reason => $why,
- );
- $Test_Results[$Curr_Test-1] = \%result;
-
- my $out = "ok";
- $out .= " $Curr_Test" if $self->use_numbers;
- $out .= " # skip $why\n";
-
- $Test->_print($out);
-
- return 1;
-}
-
-
-=item B<todo_skip>
-
- $Test->todo_skip;
- $Test->todo_skip($why);
-
-Like skip(), only it will declare the test as failing and TODO. Similar
-to
-
- print "not ok $tnum # TODO $why\n";
-
-=cut
-
-sub todo_skip {
- my($self, $why) = @_;
- $why ||= '';
-
- unless( $Have_Plan ) {
- require Carp;
- Carp::croak("You tried to run tests without a plan! Gotta have a plan.");
- }
-
- lock($Curr_Test);
- $Curr_Test++;
-
- my %result;
- share(%result);
- %result = (
- 'ok' => 1,
- actual_ok => 0,
- name => '',
- type => 'todo_skip',
- reason => $why,
- );
-
- $Test_Results[$Curr_Test-1] = \%result;
-
- my $out = "not ok";
- $out .= " $Curr_Test" if $self->use_numbers;
- $out .= " # TODO & SKIP $why\n";
-
- $Test->_print($out);
-
- return 1;
-}
-
-
-=begin _unimplemented
-
-=item B<skip_rest>
-
- $Test->skip_rest;
- $Test->skip_rest($reason);
-
-Like skip(), only it skips all the rest of the tests you plan to run
-and terminates the test.
-
-If you're running under no_plan, it skips once and terminates the
-test.
-
-=end _unimplemented
-
-=back
-
-
-=head2 Test style
-
-=over 4
-
-=item B<level>
-
- $Test->level($how_high);
-
-How far up the call stack should $Test look when reporting where the
-test failed.
-
-Defaults to 1.
-
-Setting $Test::Builder::Level overrides. This is typically useful
-localized:
-
- {
- local $Test::Builder::Level = 2;
- $Test->ok($test);
- }
-
-=cut
-
-sub level {
- my($self, $level) = @_;
-
- if( defined $level ) {
- $Level = $level;
- }
- return $Level;
-}
-
-$CLASS->level(1);
-
-
-=item B<use_numbers>
-
- $Test->use_numbers($on_or_off);
-
-Whether or not the test should output numbers. That is, this if true:
-
- ok 1
- ok 2
- ok 3
-
-or this if false
-
- ok
- ok
- ok
-
-Most useful when you can't depend on the test output order, such as
-when threads or forking is involved.
-
-Test::Harness will accept either, but avoid mixing the two styles.
-
-Defaults to on.
-
-=cut
-
-my $Use_Nums = 1;
-sub use_numbers {
- my($self, $use_nums) = @_;
-
- if( defined $use_nums ) {
- $Use_Nums = $use_nums;
- }
- return $Use_Nums;
-}
-
-=item B<no_header>
-
- $Test->no_header($no_header);
-
-If set to true, no "1..N" header will be printed.
-
-=item B<no_ending>
-
- $Test->no_ending($no_ending);
-
-Normally, Test::Builder does some extra diagnostics when the test
-ends. It also changes the exit code as described in Test::Simple.
-
-If this is true, none of that will be done.
-
-=cut
-
-my($No_Header, $No_Ending) = (0,0);
-sub no_header {
- my($self, $no_header) = @_;
-
- if( defined $no_header ) {
- $No_Header = $no_header;
- }
- return $No_Header;
-}
-
-sub no_ending {
- my($self, $no_ending) = @_;
-
- if( defined $no_ending ) {
- $No_Ending = $no_ending;
- }
- return $No_Ending;
-}
-
-
-=back
-
-=head2 Output
-
-Controlling where the test output goes.
-
-It's ok for your test to change where STDOUT and STDERR point to,
-Test::Builder's default output settings will not be affected.
-
-=over 4
-
-=item B<diag>
-
- $Test->diag(@msgs);
-
-Prints out the given $message. Normally, it uses the failure_output()
-handle, but if this is for a TODO test, the todo_output() handle is
-used.
-
-Output will be indented and marked with a # so as not to interfere
-with test output. A newline will be put on the end if there isn't one
-already.
-
-We encourage using this rather than calling print directly.
-
-Returns false. Why? Because diag() is often used in conjunction with
-a failing test (C<ok() || diag()>) it "passes through" the failure.
-
- return ok(...) || diag(...);
-
-=for blame transfer
-Mark Fowler <mark@twoshortplanks.com>
-
-=cut
-
-sub diag {
- my($self, @msgs) = @_;
- return unless @msgs;
-
- # Prevent printing headers when compiling (i.e. -c)
- return if $^C;
-
- # Escape each line with a #.
- foreach (@msgs) {
- $_ = 'undef' unless defined;
- s/^/# /gms;
- }
-
- push @msgs, "\n" unless $msgs[-1] =~ /\n\Z/;
-
- local $Level = $Level + 1;
- my $fh = $self->todo ? $self->todo_output : $self->failure_output;
- local($\, $", $,) = (undef, ' ', '');
- print $fh @msgs;
-
- return 0;
-}
-
-=begin _private
-
-=item B<_print>
-
- $Test->_print(@msgs);
-
-Prints to the output() filehandle.
-
-=end _private
-
-=cut
-
-sub _print {
- my($self, @msgs) = @_;
-
- # Prevent printing headers when only compiling. Mostly for when
- # tests are deparsed with B::Deparse
- return if $^C;
-
- local($\, $", $,) = (undef, ' ', '');
- my $fh = $self->output;
-
- # Escape each line after the first with a # so we don't
- # confuse Test::Harness.
- foreach (@msgs) {
- s/\n(.)/\n# $1/sg;
- }
-
- push @msgs, "\n" unless $msgs[-1] =~ /\n\Z/;
-
- print $fh @msgs;
-}
-
-
-=item B<output>
-
- $Test->output($fh);
- $Test->output($file);
-
-Where normal "ok/not ok" test output should go.
-
-Defaults to STDOUT.
-
-=item B<failure_output>
-
- $Test->failure_output($fh);
- $Test->failure_output($file);
-
-Where diagnostic output on test failures and diag() should go.
-
-Defaults to STDERR.
-
-=item B<todo_output>
-
- $Test->todo_output($fh);
- $Test->todo_output($file);
-
-Where diagnostics about todo test failures and diag() should go.
-
-Defaults to STDOUT.
-
-=cut
-
-my($Out_FH, $Fail_FH, $Todo_FH);
-sub output {
- my($self, $fh) = @_;
-
- if( defined $fh ) {
- $Out_FH = _new_fh($fh);
- }
- return $Out_FH;
-}
-
-sub failure_output {
- my($self, $fh) = @_;
-
- if( defined $fh ) {
- $Fail_FH = _new_fh($fh);
- }
- return $Fail_FH;
-}
-
-sub todo_output {
- my($self, $fh) = @_;
-
- if( defined $fh ) {
- $Todo_FH = _new_fh($fh);
- }
- return $Todo_FH;
-}
-
-sub _new_fh {
- my($file_or_fh) = shift;
-
- my $fh;
- unless( UNIVERSAL::isa($file_or_fh, 'GLOB') ) {
- $fh = do { local *FH };
- open $fh, ">$file_or_fh" or
- die "Can't open test output log $file_or_fh: $!";
- }
- else {
- $fh = $file_or_fh;
- }
-
- return $fh;
-}
-
-unless( $^C ) {
- # We dup STDOUT and STDERR so people can change them in their
- # test suites while still getting normal test output.
- open(TESTOUT, ">&STDOUT") or die "Can't dup STDOUT: $!";
- open(TESTERR, ">&STDERR") or die "Can't dup STDERR: $!";
-
- # Set everything to unbuffered else plain prints to STDOUT will
- # come out in the wrong order from our own prints.
- _autoflush(\*TESTOUT);
- _autoflush(\*STDOUT);
- _autoflush(\*TESTERR);
- _autoflush(\*STDERR);
-
- $CLASS->output(\*TESTOUT);
- $CLASS->failure_output(\*TESTERR);
- $CLASS->todo_output(\*TESTOUT);
-}
-
-sub _autoflush {
- my($fh) = shift;
- my $old_fh = select $fh;
- $| = 1;
- select $old_fh;
-}
-
-
-=back
-
-
-=head2 Test Status and Info
-
-=over 4
-
-=item B<current_test>
-
- my $curr_test = $Test->current_test;
- $Test->current_test($num);
-
-Gets/sets the current test # we're on.
-
-You usually shouldn't have to set this.
-
-=cut
-
-sub current_test {
- my($self, $num) = @_;
-
- lock($Curr_Test);
- if( defined $num ) {
- unless( $Have_Plan ) {
- require Carp;
- Carp::croak("Can't change the current test number without a plan!");
- }
-
- $Curr_Test = $num;
- if( $num > @Test_Results ) {
- my $start = @Test_Results ? $#Test_Results + 1 : 0;
- for ($start..$num-1) {
- my %result;
- share(%result);
- %result = ( ok => 1,
- actual_ok => undef,
- reason => 'incrementing test number',
- type => 'unknown',
- name => undef
- );
- $Test_Results[$_] = \%result;
- }
- }
- }
- return $Curr_Test;
-}
-
-
-=item B<summary>
-
- my @tests = $Test->summary;
-
-A simple summary of the tests so far. True for pass, false for fail.
-This is a logical pass/fail, so todos are passes.
-
-Of course, test #1 is $tests[0], etc...
-
-=cut
-
-sub summary {
- my($self) = shift;
-
- return map { $_->{'ok'} } @Test_Results;
-}
-
-=item B<details>
-
- my @tests = $Test->details;
-
-Like summary(), but with a lot more detail.
-
- $tests[$test_num - 1] =
- { 'ok' => is the test considered a pass?
- actual_ok => did it literally say 'ok'?
- name => name of the test (if any)
- type => type of test (if any, see below).
- reason => reason for the above (if any)
- };
-
-'ok' is true if Test::Harness will consider the test to be a pass.
-
-'actual_ok' is a reflection of whether or not the test literally
-printed 'ok' or 'not ok'. This is for examining the result of 'todo'
-tests.
-
-'name' is the name of the test.
-
-'type' indicates if it was a special test. Normal tests have a type
-of ''. Type can be one of the following:
-
- skip see skip()
- todo see todo()
- todo_skip see todo_skip()
- unknown see below
-
-Sometimes the Test::Builder test counter is incremented without it
-printing any test output, for example, when current_test() is changed.
-In these cases, Test::Builder doesn't know the result of the test, so
-it's type is 'unkown'. These details for these tests are filled in.
-They are considered ok, but the name and actual_ok is left undef.
-
-For example "not ok 23 - hole count # TODO insufficient donuts" would
-result in this structure:
-
- $tests[22] = # 23 - 1, since arrays start from 0.
- { ok => 1, # logically, the test passed since it's todo
- actual_ok => 0, # in absolute terms, it failed
- name => 'hole count',
- type => 'todo',
- reason => 'insufficient donuts'
- };
-
-=cut
-
-sub details {
- return @Test_Results;
-}
-
-=item B<todo>
-
- my $todo_reason = $Test->todo;
- my $todo_reason = $Test->todo($pack);
-
-todo() looks for a $TODO variable in your tests. If set, all tests
-will be considered 'todo' (see Test::More and Test::Harness for
-details). Returns the reason (ie. the value of $TODO) if running as
-todo tests, false otherwise.
-
-todo() is pretty part about finding the right package to look for
-$TODO in. It uses the exported_to() package to find it. If that's
-not set, it's pretty good at guessing the right package to look at.
-
-Sometimes there is some confusion about where todo() should be looking
-for the $TODO variable. If you want to be sure, tell it explicitly
-what $pack to use.
-
-=cut
-
-sub todo {
- my($self, $pack) = @_;
-
- $pack = $pack || $self->exported_to || $self->caller(1);
-
- no strict 'refs';
- return defined ${$pack.'::TODO'} ? ${$pack.'::TODO'}
- : 0;
-}
-
-=item B<caller>
-
- my $package = $Test->caller;
- my($pack, $file, $line) = $Test->caller;
- my($pack, $file, $line) = $Test->caller($height);
-
-Like the normal caller(), except it reports according to your level().
-
-=cut
-
-sub caller {
- my($self, $height) = @_;
- $height ||= 0;
-
- my @caller = CORE::caller($self->level + $height + 1);
- return wantarray ? @caller : $caller[0];
-}
-
-=back
-
-=cut
-
-=begin _private
-
-=over 4
-
-=item B<_sanity_check>
-
- _sanity_check();
-
-Runs a bunch of end of test sanity checks to make sure reality came
-through ok. If anything is wrong it will die with a fairly friendly
-error message.
-
-=cut
-
-#'#
-sub _sanity_check {
- _whoa($Curr_Test < 0, 'Says here you ran a negative number of tests!');
- _whoa(!$Have_Plan and $Curr_Test,
- 'Somehow your tests ran without a plan!');
- _whoa($Curr_Test != @Test_Results,
- 'Somehow you got a different number of results than tests ran!');
-}
-
-=item B<_whoa>
-
- _whoa($check, $description);
-
-A sanity check, similar to assert(). If the $check is true, something
-has gone horribly wrong. It will die with the given $description and
-a note to contact the author.
-
-=cut
-
-sub _whoa {
- my($check, $desc) = @_;
- if( $check ) {
- die <<WHOA;
-WHOA! $desc
-This should never happen! Please contact the author immediately!
-WHOA
- }
-}
-
-=item B<_my_exit>
-
- _my_exit($exit_num);
-
-Perl seems to have some trouble with exiting inside an END block. 5.005_03
-and 5.6.1 both seem to do odd things. Instead, this function edits $?
-directly. It should ONLY be called from inside an END block. It
-doesn't actually exit, that's your job.
-
-=cut
-
-sub _my_exit {
- $? = $_[0];
-
- return 1;
-}
-
-
-=back
-
-=end _private
-
-=cut
-
-$SIG{__DIE__} = sub {
- # We don't want to muck with death in an eval, but $^S isn't
- # totally reliable. 5.005_03 and 5.6.1 both do the wrong thing
- # with it. Instead, we use caller. This also means it runs under
- # 5.004!
- my $in_eval = 0;
- for( my $stack = 1; my $sub = (CORE::caller($stack))[3]; $stack++ ) {
- $in_eval = 1 if $sub =~ /^\(eval\)/;
- }
- $Test_Died = 1 unless $in_eval;
-};
-
-sub _ending {
- my $self = shift;
-
- _sanity_check();
-
- # Don't bother with an ending if this is a forked copy. Only the parent
- # should do the ending.
- do{ _my_exit($?) && return } if $Original_Pid != $$;
-
- # Bailout if plan() was never called. This is so
- # "require Test::Simple" doesn't puke.
- do{ _my_exit(0) && return } if !$Have_Plan && !$Test_Died;
-
- # Figure out if we passed or failed and print helpful messages.
- if( @Test_Results ) {
- # The plan? We have no plan.
- if( $No_Plan ) {
- $self->_print("1..$Curr_Test\n") unless $self->no_header;
- $Expected_Tests = $Curr_Test;
- }
-
- # 5.8.0 threads bug. Shared arrays will not be auto-extended
- # by a slice. Worse, we have to fill in every entry else
- # we'll get an "Invalid value for shared scalar" error
- for my $idx ($#Test_Results..$Expected_Tests-1) {
- my %empty_result = ();
- share(%empty_result);
- $Test_Results[$idx] = \%empty_result
- unless defined $Test_Results[$idx];
- }
-
- my $num_failed = grep !$_->{'ok'}, @Test_Results[0..$Expected_Tests-1];
- $num_failed += abs($Expected_Tests - @Test_Results);
-
- if( $Curr_Test < $Expected_Tests ) {
- $self->diag(<<"FAIL");
-Looks like you planned $Expected_Tests tests but only ran $Curr_Test.
-FAIL
- }
- elsif( $Curr_Test > $Expected_Tests ) {
- my $num_extra = $Curr_Test - $Expected_Tests;
- $self->diag(<<"FAIL");
-Looks like you planned $Expected_Tests tests but ran $num_extra extra.
-FAIL
- }
- elsif ( $num_failed ) {
- $self->diag(<<"FAIL");
-Looks like you failed $num_failed tests of $Expected_Tests.
-FAIL
- }
-
- if( $Test_Died ) {
- $self->diag(<<"FAIL");
-Looks like your test died just after $Curr_Test.
-FAIL
-
- _my_exit( 255 ) && return;
- }
-
- _my_exit( $num_failed <= 254 ? $num_failed : 254 ) && return;
- }
- elsif ( $Skip_All ) {
- _my_exit( 0 ) && return;
- }
- elsif ( $Test_Died ) {
- $self->diag(<<'FAIL');
-Looks like your test died before it could output anything.
-FAIL
- }
- else {
- $self->diag("No tests run!\n");
- _my_exit( 255 ) && return;
- }
-}
-
-END {
- $Test->_ending if defined $Test and !$Test->no_ending;
-}
-
-=head1 THREADS
-
-In perl 5.8.0 and later, Test::Builder is thread-safe. The test
-number is shared amongst all threads. This means if one thread sets
-the test number using current_test() they will all be effected.
-
-=head1 EXAMPLES
-
-CPAN can provide the best examples. Test::Simple, Test::More,
-Test::Exception and Test::Differences all use Test::Builder.
-
-=head1 SEE ALSO
-
-Test::Simple, Test::More, Test::Harness
-
-=head1 AUTHORS
-
-Original code by chromatic, maintained by Michael G Schwern
-E<lt>schwern@pobox.comE<gt>
-
-=head1 COPYRIGHT
-
-Copyright 2002 by chromatic E<lt>chromatic@wgz.orgE<gt>,
- Michael G Schwern E<lt>schwern@pobox.comE<gt>.
-
-This program is free software; you can redistribute it and/or
-modify it under the same terms as Perl itself.
-
-See F<http://www.perl.com/perl/misc/Artistic.html>
-
-=cut
-
-1;
diff --git a/Perl/t/Test/Harness.pm b/Perl/t/Test/Harness.pm
deleted file mode 100644
index 0897455..0000000
--- a/Perl/t/Test/Harness.pm
+++ /dev/null
@@ -1,1168 +0,0 @@
-# -*- Mode: cperl; cperl-indent-level: 4 -*-
-# $Id$
-
-package Test::Harness;
-
-require 5.004;
-use Test::Harness::Straps;
-use Test::Harness::Assert;
-use Exporter;
-use Benchmark;
-use Config;
-use strict;
-
-use vars qw($VERSION $Verbose $Switches $Have_Devel_Corestack $Curtest
- $Columns $verbose $switches $ML $Strap
- @ISA @EXPORT @EXPORT_OK $Last_ML_Print
- );
-
-# Backwards compatibility for exportable variable names.
-*verbose = *Verbose;
-*switches = *Switches;
-
-$Have_Devel_Corestack = 0;
-
-$VERSION = '2.30';
-
-$ENV{HARNESS_ACTIVE} = 1;
-
-END {
- # For VMS.
- delete $ENV{HARNESS_ACTIVE};
-}
-
-# Some experimental versions of OS/2 build have broken $?
-my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
-
-my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
-
-my $Ok_Slow = $ENV{HARNESS_OK_SLOW};
-
-$Strap = Test::Harness::Straps->new;
-
-@ISA = ('Exporter');
-@EXPORT = qw(&runtests);
-@EXPORT_OK = qw($verbose $switches);
-
-$Verbose = $ENV{HARNESS_VERBOSE} || 0;
-$Switches = "-w";
-$Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
-$Columns--; # Some shells have trouble with a full line of text.
-
-
-=head1 NAME
-
-Test::Harness - run perl standard test scripts with statistics
-
-=head1 SYNOPSIS
-
- use Test::Harness;
-
- runtests(@test_files);
-
-=head1 DESCRIPTION
-
-B<STOP!> If all you want to do is write a test script, consider using
-Test::Simple. Otherwise, read on.
-
-(By using the Test module, you can write test scripts without
-knowing the exact output this module expects. However, if you need to
-know the specifics, read on!)
-
-Perl test scripts print to standard output C<"ok N"> for each single
-test, where C<N> is an increasing sequence of integers. The first line
-output by a standard test script is C<"1..M"> with C<M> being the
-number of tests that should be run within the test
-script. Test::Harness::runtests(@tests) runs all the testscripts
-named as arguments and checks standard output for the expected
-C<"ok N"> strings.
-
-After all tests have been performed, runtests() prints some
-performance statistics that are computed by the Benchmark module.
-
-=head2 The test script output
-
-The following explains how Test::Harness interprets the output of your
-test program.
-
-=over 4
-
-=item B<'1..M'>
-
-This header tells how many tests there will be. For example, C<1..10>
-means you plan on running 10 tests. This is a safeguard in case your
-test dies quietly in the middle of its run.
-
-It should be the first non-comment line output by your test program.
-
-In certain instances, you may not know how many tests you will
-ultimately be running. In this case, it is permitted for the 1..M
-header to appear as the B<last> line output by your test (again, it
-can be followed by further comments).
-
-Under B<no> circumstances should 1..M appear in the middle of your
-output or more than once.
-
-
-=item B<'ok', 'not ok'. Ok?>
-
-Any output from the testscript to standard error is ignored and
-bypassed, thus will be seen by the user. Lines written to standard
-output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
-runtests(). All other lines are discarded.
-
-C</^not ok/> indicates a failed test. C</^ok/> is a successful test.
-
-
-=item B<test numbers>
-
-Perl normally expects the 'ok' or 'not ok' to be followed by a test
-number. It is tolerated if the test numbers after 'ok' are
-omitted. In this case Test::Harness maintains temporarily its own
-counter until the script supplies test numbers again. So the following
-test script
-
- print <<END;
- 1..6
- not ok
- ok
- not ok
- ok
- ok
- END
-
-will generate
-
- FAILED tests 1, 3, 6
- Failed 3/6 tests, 50.00% okay
-
-=item B<test names>
-
-Anything after the test number but before the # is considered to be
-the name of the test.
-
- ok 42 this is the name of the test
-
-Currently, Test::Harness does nothing with this information.
-
-=item B<Skipping tests>
-
-If the standard output line contains the substring C< # Skip> (with
-variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
-counted as a skipped test. If the whole testscript succeeds, the
-count of skipped tests is included in the generated output.
-C<Test::Harness> reports the text after C< # Skip\S*\s+> as a reason
-for skipping.
-
- ok 23 # skip Insufficient flogiston pressure.
-
-Similarly, one can include a similar explanation in a C<1..0> line
-emitted if the test script is skipped completely:
-
- 1..0 # Skipped: no leverage found
-
-=item B<Todo tests>
-
-If the standard output line contains the substring C< # TODO> after
-C<not ok> or C<not ok NUMBER>, it is counted as a todo test. The text
-afterwards is the thing that has to be done before this test will
-succeed.
-
- not ok 13 # TODO harness the power of the atom
-
-=begin _deprecated
-
-Alternatively, you can specify a list of what tests are todo as part
-of the test header.
-
- 1..23 todo 5 12 23
-
-This only works if the header appears at the beginning of the test.
-
-This style is B<deprecated>.
-
-=end _deprecated
-
-These tests represent a feature to be implemented or a bug to be fixed
-and act as something of an executable "thing to do" list. They are
-B<not> expected to succeed. Should a todo test begin succeeding,
-Test::Harness will report it as a bonus. This indicates that whatever
-you were supposed to do has been done and you should promote this to a
-normal test.
-
-=item B<Bail out!>
-
-As an emergency measure, a test script can decide that further tests
-are useless (e.g. missing dependencies) and testing should stop
-immediately. In that case the test script prints the magic words
-
- Bail out!
-
-to standard output. Any message after these words will be displayed by
-C<Test::Harness> as the reason why testing is stopped.
-
-=item B<Comments>
-
-Additional comments may be put into the testing output on their own
-lines. Comment lines should begin with a '#', Test::Harness will
-ignore them.
-
- ok 1
- # Life is good, the sun is shining, RAM is cheap.
- not ok 2
- # got 'Bush' expected 'Gore'
-
-=item B<Anything else>
-
-Any other output Test::Harness sees it will silently ignore B<BUT WE
-PLAN TO CHANGE THIS!> If you wish to place additional output in your
-test script, please use a comment.
-
-=back
-
-
-=head2 Taint mode
-
-Test::Harness will honor the C<-T> in the #! line on your test files. So
-if you begin a test with:
-
- #!perl -T
-
-the test will be run with taint mode on.
-
-
-=head2 Configuration variables.
-
-These variables can be used to configure the behavior of
-Test::Harness. They are exported on request.
-
-=over 4
-
-=item B<$Test::Harness::verbose>
-
-The global variable $Test::Harness::verbose is exportable and can be
-used to let runtests() display the standard output of the script
-without altering the behavior otherwise.
-
-=item B<$Test::Harness::switches>
-
-The global variable $Test::Harness::switches is exportable and can be
-used to set perl command line options used for running the test
-script(s). The default value is C<-w>.
-
-=back
-
-
-=head2 Failure
-
-It will happen, your tests will fail. After you mop up your ego, you
-can begin examining the summary report:
-
- t/base..............ok
- t/nonumbers.........ok
- t/ok................ok
- t/test-harness......ok
- t/waterloo..........dubious
- Test returned status 3 (wstat 768, 0x300)
- DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
- Failed 10/20 tests, 50.00% okay
- Failed Test Stat Wstat Total Fail Failed List of Failed
- -----------------------------------------------------------------------
- t/waterloo.t 3 768 20 10 50.00% 1 3 5 7 9 11 13 15 17 19
- Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
-
-Everything passed but t/waterloo.t. It failed 10 of 20 tests and
-exited with non-zero status indicating something dubious happened.
-
-The columns in the summary report mean:
-
-=over 4
-
-=item B<Failed Test>
-
-The test file which failed.
-
-=item B<Stat>
-
-If the test exited with non-zero, this is its exit status.
-
-=item B<Wstat>
-
-The wait status of the test I<umm, I need a better explanation here>.
-
-=item B<Total>
-
-Total number of tests expected to run.
-
-=item B<Fail>
-
-Number which failed, either from "not ok" or because they never ran.
-
-=item B<Failed>
-
-Percentage of the total tests which failed.
-
-=item B<List of Failed>
-
-A list of the tests which failed. Successive failures may be
-abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
-20 failed).
-
-=back
-
-
-=head2 Functions
-
-Test::Harness currently only has one function, here it is.
-
-=over 4
-
-=item B<runtests>
-
- my $allok = runtests(@test_files);
-
-This runs all the given @test_files and divines whether they passed
-or failed based on their output to STDOUT (details above). It prints
-out each individual test which failed along with a summary report and
-a how long it all took.
-
-It returns true if everything was ok. Otherwise it will die() with
-one of the messages in the DIAGNOSTICS section.
-
-=for _private
-
-This is just _run_all_tests() plus _show_results()
-
-=cut
-
-sub runtests {
- my(@tests) = @_;
-
- local ($\, $,);
-
- my($tot, $failedtests) = _run_all_tests(@tests);
- _show_results($tot, $failedtests);
-
- my $ok = _all_ok($tot);
-
- assert(($ok xor keys %$failedtests),
- q{ok status jives with $failedtests});
-
- return $ok;
-}
-
-=begin _private
-
-=item B<_all_ok>
-
- my $ok = _all_ok(\%tot);
-
-Tells you if this test run is overall successful or not.
-
-=cut
-
-sub _all_ok {
- my($tot) = shift;
-
- return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
-}
-
-=item B<_globdir>
-
- my @files = _globdir $dir;
-
-Returns all the files in a directory. This is shorthand for backwards
-compatibility on systems where glob() doesn't work right.
-
-=cut
-
-sub _globdir {
- opendir DIRH, shift;
- my @f = readdir DIRH;
- closedir DIRH;
-
- return @f;
-}
-
-=item B<_run_all_tests>
-
- my($total, $failed) = _run_all_tests(@test_files);
-
-Runs all the given @test_files (as runtests()) but does it quietly (no
-report). $total is a hash ref summary of all the tests run. Its keys
-and values are this:
-
- bonus Number of individual todo tests unexpectedly passed
- max Number of individual tests ran
- ok Number of individual tests passed
- sub_skipped Number of individual tests skipped
- todo Number of individual todo tests
-
- files Number of test files ran
- good Number of test files passed
- bad Number of test files failed
- tests Number of test files originally given
- skipped Number of test files skipped
-
-If $total->{bad} == 0 and $total->{max} > 0, you've got a successful
-test.
-
-$failed is a hash ref of all the test scripts which failed. Each key
-is the name of a test script, each value is another hash representing
-how that script failed. Its keys are these:
-
- name Name of the test which failed
- estat Script's exit value
- wstat Script's wait status
- max Number of individual tests
- failed Number which failed
- percent Percentage of tests which failed
- canon List of tests which failed (as string).
-
-Needless to say, $failed should be empty if everything passed.
-
-B<NOTE> Currently this function is still noisy. I'm working on it.
-
-=cut
-
-#'#
-sub _run_all_tests {
- my(@tests) = @_;
- local($|) = 1;
- my(%failedtests);
-
- # Test-wide totals.
- my(%tot) = (
- bonus => 0,
- max => 0,
- ok => 0,
- files => 0,
- bad => 0,
- good => 0,
- tests => scalar @tests,
- sub_skipped => 0,
- todo => 0,
- skipped => 0,
- bench => 0,
- );
-
- my @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
- my $t_start = new Benchmark;
-
- my $width = _leader_width(@tests);
- foreach my $tfile (@tests) {
- $Last_ML_Print = 0; # so each test prints at least once
- my($leader, $ml) = _mk_leader($tfile, $width);
- local $ML = $ml;
- print $leader;
-
- $tot{files}++;
-
- $Strap->{_seen_header} = 0;
- my %results = $Strap->analyze_file($tfile) or
- do { warn "$Strap->{error}\n"; next };
-
- # state of the current test.
- my @failed = grep { !$results{details}[$_-1]{ok} }
- 1..@{$results{details}};
- my %test = (
- ok => $results{ok},
- 'next' => $Strap->{'next'},
- max => $results{max},
- failed => \@failed,
- bonus => $results{bonus},
- skipped => $results{skip},
- skip_reason => $results{skip_reason},
- skip_all => $Strap->{skip_all},
- ml => $ml,
- );
-
- $tot{bonus} += $results{bonus};
- $tot{max} += $results{max};
- $tot{ok} += $results{ok};
- $tot{todo} += $results{todo};
- $tot{sub_skipped} += $results{skip};
-
- my($estatus, $wstatus) = @results{qw(exit wait)};
-
- if ($results{passing}) {
- if ($test{max} and $test{skipped} + $test{bonus}) {
- my @msg;
- push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
- if $test{skipped};
- push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
- if $test{bonus};
- print "$test{ml}ok\n ".join(', ', @msg)."\n";
- } elsif ($test{max}) {
- print "$test{ml}ok\n";
- } elsif (defined $test{skip_all} and length $test{skip_all}) {
- print "skipped\n all skipped: $test{skip_all}\n";
- $tot{skipped}++;
- } else {
- print "skipped\n all skipped: no reason given\n";
- $tot{skipped}++;
- }
- $tot{good}++;
- }
- else {
- # List unrun tests as failures.
- if ($test{'next'} <= $test{max}) {
- push @{$test{failed}}, $test{'next'}..$test{max};
- }
- # List overruns as failures.
- else {
- my $details = $results{details};
- foreach my $overrun ($test{max}+1..@$details)
- {
- next unless ref $details->[$overrun-1];
- push @{$test{failed}}, $overrun
- }
- }
-
- if ($wstatus) {
- $failedtests{$tfile} = _dubious_return(\%test, \%tot,
- $estatus, $wstatus);
- $failedtests{$tfile}{name} = $tfile;
- }
- elsif($results{seen}) {
- if (@{$test{failed}} and $test{max}) {
- my ($txt, $canon) = canonfailed($test{max},$test{skipped},
- @{$test{failed}});
- print "$test{ml}$txt";
- $failedtests{$tfile} = { canon => $canon,
- max => $test{max},
- failed => scalar @{$test{failed}},
- name => $tfile,
- percent => 100*(scalar @{$test{failed}})/$test{max},
- estat => '',
- wstat => '',
- };
- } else {
- print "Don't know which tests failed: got $test{ok} ok, ".
- "expected $test{max}\n";
- $failedtests{$tfile} = { canon => '??',
- max => $test{max},
- failed => '??',
- name => $tfile,
- percent => undef,
- estat => '',
- wstat => '',
- };
- }
- $tot{bad}++;
- } else {
- print "FAILED before any test output arrived\n";
- $tot{bad}++;
- $failedtests{$tfile} = { canon => '??',
- max => '??',
- failed => '??',
- name => $tfile,
- percent => undef,
- estat => '',
- wstat => '',
- };
- }
- }
-
- if (defined $Files_In_Dir) {
- my @new_dir_files = _globdir $Files_In_Dir;
- if (@new_dir_files != @dir_files) {
- my %f;
- @f{@new_dir_files} = (1) x @new_dir_files;
- delete @f{@dir_files};
- my @f = sort keys %f;
- print "LEAKED FILES: @f\n";
- @dir_files = @new_dir_files;
- }
- }
- }
- $tot{bench} = timediff(new Benchmark, $t_start);
-
- $Strap->_restore_PERL5LIB;
-
- return(\%tot, \%failedtests);
-}
-
-=item B<_mk_leader>
-
- my($leader, $ml) = _mk_leader($test_file, $width);
-
-Generates the 't/foo........' $leader for the given $test_file as well
-as a similar version which will overwrite the current line (by use of
-\r and such). $ml may be empty if Test::Harness doesn't think you're
-on TTY.
-
-The $width is the width of the "yada/blah.." string.
-
-=cut
-
-sub _mk_leader {
- my($te, $width) = @_;
- chomp($te);
- $te =~ s/\.\w+$/./;
-
- if ($^O eq 'VMS') { $te =~ s/^.*\.t\./\[.t./s; }
- my $blank = (' ' x 77);
- my $leader = "$te" . '.' x ($width - length($te));
- my $ml = "";
-
- $ml = "\r$blank\r$leader"
- if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose;
-
- return($leader, $ml);
-}
-
-=item B<_leader_width>
-
- my($width) = _leader_width(@test_files);
-
-Calculates how wide the leader should be based on the length of the
-longest test name.
-
-=cut
-
-sub _leader_width {
- my $maxlen = 0;
- my $maxsuflen = 0;
- foreach (@_) {
- my $suf = /\.(\w+)$/ ? $1 : '';
- my $len = length;
- my $suflen = length $suf;
- $maxlen = $len if $len > $maxlen;
- $maxsuflen = $suflen if $suflen > $maxsuflen;
- }
- # + 3 : we want three dots between the test name and the "ok"
- return $maxlen + 3 - $maxsuflen;
-}
-
-
-sub _show_results {
- my($tot, $failedtests) = @_;
-
- my $pct;
- my $bonusmsg = _bonusmsg($tot);
-
- if (_all_ok($tot)) {
- print "All tests successful$bonusmsg.\n";
- } elsif (!$tot->{tests}){
- die "FAILED--no tests were run for some reason.\n";
- } elsif (!$tot->{max}) {
- my $blurb = $tot->{tests}==1 ? "script" : "scripts";
- die "FAILED--$tot->{tests} test $blurb could be run, ".
- "alas--no output ever seen\n";
- } else {
- $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
- my $percent_ok = 100*$tot->{ok}/$tot->{max};
- my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
- $tot->{max} - $tot->{ok}, $tot->{max},
- $percent_ok;
-
- my($fmt_top, $fmt) = _create_fmts($failedtests);
-
- # Now write to formats
- for my $script (sort keys %$failedtests) {
- $Curtest = $failedtests->{$script};
- write;
- }
- if ($tot->{bad}) {
- $bonusmsg =~ s/^,\s*//;
- print "$bonusmsg.\n" if $bonusmsg;
- die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
- "$subpct\n";
- }
- }
-
- printf("Files=%d, Tests=%d, %s\n",
- $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
-}
-
-
-my %Handlers = ();
-$Strap->{callback} = sub {
- my($self, $line, $type, $totals) = @_;
- print $line if $Verbose;
-
- my $meth = $Handlers{$type};
- $meth->($self, $line, $type, $totals) if $meth;
-};
-
-
-$Handlers{header} = sub {
- my($self, $line, $type, $totals) = @_;
-
- warn "Test header seen more than once!\n" if $self->{_seen_header};
-
- $self->{_seen_header}++;
-
- warn "1..M can only appear at the beginning or end of tests\n"
- if $totals->{seen} &&
- $totals->{max} < $totals->{seen};
-};
-
-$Handlers{test} = sub {
- my($self, $line, $type, $totals) = @_;
-
- my $curr = $totals->{seen};
- my $next = $self->{'next'};
- my $max = $totals->{max};
- my $detail = $totals->{details}[-1];
-
- if( $detail->{ok} ) {
- _print_ml_less("ok $curr/$max");
-
- if( $detail->{type} eq 'skip' ) {
- $totals->{skip_reason} = $detail->{reason}
- unless defined $totals->{skip_reason};
- $totals->{skip_reason} = 'various reasons'
- if $totals->{skip_reason} ne $detail->{reason};
- }
- }
- else {
- _print_ml("NOK $curr");
- }
-
- if( $curr > $next ) {
- print "Test output counter mismatch [test $curr]\n";
- }
- elsif( $curr < $next ) {
- print "Confused test output: test $curr answered after ".
- "test ", $next - 1, "\n";
- }
-
-};
-
-$Handlers{bailout} = sub {
- my($self, $line, $type, $totals) = @_;
-
- die "FAILED--Further testing stopped" .
- ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
-};
-
-
-sub _print_ml {
- print join '', $ML, @_ if $ML;
-}
-
-
-# For slow connections, we save lots of bandwidth by printing only once
-# per second.
-sub _print_ml_less {
- if( !$Ok_Slow || $Last_ML_Print != time ) {
- _print_ml(@_);
- $Last_ML_Print = time;
- }
-}
-
-sub _bonusmsg {
- my($tot) = @_;
-
- my $bonusmsg = '';
- $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
- " UNEXPECTEDLY SUCCEEDED)")
- if $tot->{bonus};
-
- if ($tot->{skipped}) {
- $bonusmsg .= ", $tot->{skipped} test"
- . ($tot->{skipped} != 1 ? 's' : '');
- if ($tot->{sub_skipped}) {
- $bonusmsg .= " and $tot->{sub_skipped} subtest"
- . ($tot->{sub_skipped} != 1 ? 's' : '');
- }
- $bonusmsg .= ' skipped';
- }
- elsif ($tot->{sub_skipped}) {
- $bonusmsg .= ", $tot->{sub_skipped} subtest"
- . ($tot->{sub_skipped} != 1 ? 's' : '')
- . " skipped";
- }
-
- return $bonusmsg;
-}
-
-# Test program go boom.
-sub _dubious_return {
- my($test, $tot, $estatus, $wstatus) = @_;
- my ($failed, $canon, $percent) = ('??', '??');
-
- printf "$test->{ml}dubious\n\tTest returned status $estatus ".
- "(wstat %d, 0x%x)\n",
- $wstatus,$wstatus;
- print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
-
- if (corestatus($wstatus)) { # until we have a wait module
- if ($Have_Devel_Corestack) {
- Devel::CoreStack::stack($^X);
- } else {
- print "\ttest program seems to have generated a core\n";
- }
- }
-
- $tot->{bad}++;
-
- if ($test->{max}) {
- if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) {
- print "\tafter all the subtests completed successfully\n";
- $percent = 0;
- $failed = 0; # But we do not set $canon!
- }
- else {
- push @{$test->{failed}}, $test->{'next'}..$test->{max};
- $failed = @{$test->{failed}};
- (my $txt, $canon) = canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
- $percent = 100*(scalar @{$test->{failed}})/$test->{max};
- print "DIED. ",$txt;
- }
- }
-
- return { canon => $canon, max => $test->{max} || '??',
- failed => $failed,
- percent => $percent,
- estat => $estatus, wstat => $wstatus,
- };
-}
-
-
-sub _create_fmts {
- my($failedtests) = @_;
-
- my $failed_str = "Failed Test";
- my $middle_str = " Stat Wstat Total Fail Failed ";
- my $list_str = "List of Failed";
-
- # Figure out our longest name string for formatting purposes.
- my $max_namelen = length($failed_str);
- foreach my $script (keys %$failedtests) {
- my $namelen = length $failedtests->{$script}->{name};
- $max_namelen = $namelen if $namelen > $max_namelen;
- }
-
- my $list_len = $Columns - length($middle_str) - $max_namelen;
- if ($list_len < length($list_str)) {
- $list_len = length($list_str);
- $max_namelen = $Columns - length($middle_str) - $list_len;
- if ($max_namelen < length($failed_str)) {
- $max_namelen = length($failed_str);
- $Columns = $max_namelen + length($middle_str) + $list_len;
- }
- }
-
- my $fmt_top = "format STDOUT_TOP =\n"
- . sprintf("%-${max_namelen}s", $failed_str)
- . $middle_str
- . $list_str . "\n"
- . "-" x $Columns
- . "\n.\n";
-
- my $fmt = "format STDOUT =\n"
- . "@" . "<" x ($max_namelen - 1)
- . " @>> @>>>> @>>>> @>>> ^##.##% "
- . "^" . "<" x ($list_len - 1) . "\n"
- . '{ $Curtest->{name}, $Curtest->{estat},'
- . ' $Curtest->{wstat}, $Curtest->{max},'
- . ' $Curtest->{failed}, $Curtest->{percent},'
- . ' $Curtest->{canon}'
- . "\n}\n"
- . "~~" . " " x ($Columns - $list_len - 2) . "^"
- . "<" x ($list_len - 1) . "\n"
- . '$Curtest->{canon}'
- . "\n.\n";
-
- eval $fmt_top;
- die $@ if $@;
- eval $fmt;
- die $@ if $@;
-
- return($fmt_top, $fmt);
-}
-
-{
- my $tried_devel_corestack;
-
- sub corestatus {
- my($st) = @_;
-
- my $did_core;
- eval { # we may not have a WCOREDUMP
- local $^W = 0; # *.ph files are often *very* noisy
- require 'wait.ph';
- $did_core = WCOREDUMP($st);
- };
- if( $@ ) {
- $did_core = $st & 0200;
- }
-
- eval { require Devel::CoreStack; $Have_Devel_Corestack++ }
- unless $tried_devel_corestack++;
-
- return $did_core;
- }
-}
-
-sub canonfailed ($$@) {
- my($max,$skipped,@failed) = @_;
- my %seen;
- @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
- my $failed = @failed;
- my @result = ();
- my @canon = ();
- my $min;
- my $last = $min = shift @failed;
- my $canon;
- if (@failed) {
- for (@failed, $failed[-1]) { # don't forget the last one
- if ($_ > $last+1 || $_ == $last) {
- if ($min == $last) {
- push @canon, $last;
- } else {
- push @canon, "$min-$last";
- }
- $min = $_;
- }
- $last = $_;
- }
- local $" = ", ";
- push @result, "FAILED tests @canon\n";
- $canon = join ' ', @canon;
- } else {
- push @result, "FAILED test $last\n";
- $canon = $last;
- }
-
- push @result, "\tFailed $failed/$max tests, ";
- if ($max) {
- push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
- } else {
- push @result, "?% okay";
- }
- my $ender = 's' x ($skipped > 1);
- my $good = $max - $failed - $skipped;
- if ($skipped) {
- my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
- if ($max) {
- my $goodper = sprintf("%.2f",100*($good/$max));
- $skipmsg .= "$goodper%)";
- } else {
- $skipmsg .= "?%)";
- }
- push @result, $skipmsg;
- }
- push @result, "\n";
- my $txt = join "", @result;
- ($txt, $canon);
-}
-
-=end _private
-
-=back
-
-=cut
-
-
-1;
-__END__
-
-
-=head1 EXPORT
-
-C<&runtests> is exported by Test::Harness by default.
-
-C<$verbose> and C<$switches> are exported upon request.
-
-
-=head1 DIAGNOSTICS
-
-=over 4
-
-=item C<All tests successful.\nFiles=%d, Tests=%d, %s>
-
-If all tests are successful some statistics about the performance are
-printed.
-
-=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
-
-For any single script that has failing subtests statistics like the
-above are printed.
-
-=item C<Test returned status %d (wstat %d)>
-
-Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
-and C<$?> are printed in a message similar to the above.
-
-=item C<Failed 1 test, %.2f%% okay. %s>
-
-=item C<Failed %d/%d tests, %.2f%% okay. %s>
-
-If not all tests were successful, the script dies with one of the
-above messages.
-
-=item C<FAILED--Further testing stopped: %s>
-
-If a single subtest decides that further testing will not make sense,
-the script dies with this message.
-
-=back
-
-=head1 ENVIRONMENT
-
-=over 4
-
-=item C<HARNESS_ACTIVE>
-
-Harness sets this before executing the individual tests. This allows
-the tests to determine if they are being executed through the harness
-or by any other means.
-
-=item C<HARNESS_COLUMNS>
-
-This value will be used for the width of the terminal. If it is not
-set then it will default to C<COLUMNS>. If this is not set, it will
-default to 80. Note that users of Bourne-sh based shells will need to
-C<export COLUMNS> for this module to use that variable.
-
-=item C<HARNESS_COMPILE_TEST>
-
-When true it will make harness attempt to compile the test using
-C<perlcc> before running it.
-
-B<NOTE> This currently only works when sitting in the perl source
-directory!
-
-=item C<HARNESS_FILELEAK_IN_DIR>
-
-When set to the name of a directory, harness will check after each
-test whether new files appeared in that directory, and report them as
-
- LEAKED FILES: scr.tmp 0 my.db
-
-If relative, directory name is with respect to the current directory at
-the moment runtests() was called. Putting absolute path into
-C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
-
-=item C<HARNESS_IGNORE_EXITCODE>
-
-Makes harness ignore the exit status of child processes when defined.
-
-=item C<HARNESS_NOTTY>
-
-When set to a true value, forces it to behave as though STDOUT were
-not a console. You may need to set this if you don't want harness to
-output more frequent progress messages using carriage returns. Some
-consoles may not handle carriage returns properly (which results in a
-somewhat messy output).
-
-=item C<HARNESS_OK_SLOW>
-
-If true, the C<ok> messages are printed out only every second.
-This reduces output and therefore may for example help testing
-over slow connections.
-
-=item C<HARNESS_PERL_SWITCHES>
-
-Its value will be prepended to the switches used to invoke perl on
-each test. For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
-run all tests with all warnings enabled.
-
-=item C<HARNESS_VERBOSE>
-
-If true, Test::Harness will output the verbose results of running
-its tests. Setting $Test::Harness::verbose will override this.
-
-=back
-
-=head1 EXAMPLE
-
-Here's how Test::Harness tests itself
-
- $ cd ~/src/devel/Test-Harness
- $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
- $verbose=0; runtests @ARGV;' t/*.t
- Using /home/schwern/src/devel/Test-Harness/blib
- t/base..............ok
- t/nonumbers.........ok
- t/ok................ok
- t/test-harness......ok
- All tests successful.
- Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
-
-=head1 SEE ALSO
-
-L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
-the underlying timing routines, L<Devel::CoreStack> to generate core
-dumps from failed tests and L<Devel::Cover> for test coverage
-analysis.
-
-=head1 AUTHORS
-
-Either Tim Bunce or Andreas Koenig, we don't know. What we know for
-sure is, that it was inspired by Larry Wall's TEST script that came
-with perl distributions for ages. Numerous anonymous contributors
-exist. Andreas Koenig held the torch for many years, and then
-Michael G Schwern.
-
-Current maintainer is Andy Lester C<< <andy@petdance.com> >>.
-
-=head1 LICENSE
-
-This program is free software; you can redistribute it and/or
-modify it under the same terms as Perl itself.
-
-See L<http://www.perl.com/perl/misc/Artistic.html>
-
-=head1 TODO
-
-Provide a way of running tests quietly (ie. no printing) for automated
-validation of tests. This will probably take the form of a version
-of runtests() which rather than printing its output returns raw data
-on the state of the tests. (Partially done in Test::Harness::Straps)
-
-Document the format.
-
-Fix HARNESS_COMPILE_TEST without breaking its core usage.
-
-Figure a way to report test names in the failure summary.
-
-Rework the test summary so long test names are not truncated as badly.
-(Partially done with new skip test styles)
-
-Deal with VMS's "not \nok 4\n" mistake.
-
-Add option for coverage analysis.
-
-Trap STDERR.
-
-Implement Straps total_results()
-
-Remember exit code
-
-Completely redo the print summary code.
-
-Implement Straps callbacks. (experimentally implemented)
-
-Straps->analyze_file() not taint clean, don't know if it can be
-
-Fix that damned VMS nit.
-
-HARNESS_TODOFAIL to display TODO failures
-
-Add a test for verbose.
-
-Change internal list of test results to a hash.
-
-Fix stats display when there's an overrun.
-
-Fix so perls with spaces in the filename work.
-
-=for _private
-
-Keeping whittling away at _run_all_tests()
-
-=for _private
-
-Clean up how the summary is printed. Get rid of those damned formats.
-
-=head1 BUGS
-
-HARNESS_COMPILE_TEST currently assumes it's run from the Perl source
-directory.
-
-=cut
diff --git a/Perl/t/Test/Harness/Assert.pm b/Perl/t/Test/Harness/Assert.pm
deleted file mode 100644
index 3ee23e3..0000000
--- a/Perl/t/Test/Harness/Assert.pm
+++ /dev/null
@@ -1,68 +0,0 @@
-# $Id$
-
-package Test::Harness::Assert;
-
-use strict;
-require Exporter;
-use vars qw($VERSION @EXPORT @ISA);
-
-$VERSION = '0.01';
-
-@ISA = qw(Exporter);
-@EXPORT = qw(assert);
-
-
-=head1 NAME
-
-Test::Harness::Assert - simple assert
-
-=head1 SYNOPSIS
-
- ### FOR INTERNAL USE ONLY ###
-
- use Test::Harness::Assert;
-
- assert( EXPR, $name );
-
-=head1 DESCRIPTION
-
-A simple assert routine since we don't have Carp::Assert handy.
-
-B<For internal use by Test::Harness ONLY!>
-
-=head2 Functions
-
-=over 4
-
-=item B<assert>
-
- assert( EXPR, $name );
-
-If the expression is false the program aborts.
-
-=cut
-
-sub assert ($;$) {
- my($assert, $name) = @_;
-
- unless( $assert ) {
- require Carp;
- my $msg = 'Assert failed';
- $msg .= " - '$name'" if defined $name;
- $msg .= '!';
- Carp::croak($msg);
- }
-
-}
-
-=head1 AUTHOR
-
-Michael G Schwern E<lt>schwern@pobox.comE<gt>
-
-=head1 SEE ALSO
-
-L<Carp::Assert>
-
-=cut
-
-1;
diff --git a/Perl/t/Test/Harness/Iterator.pm b/Perl/t/Test/Harness/Iterator.pm
deleted file mode 100644
index 5e22793..0000000
--- a/Perl/t/Test/Harness/Iterator.pm
+++ /dev/null
@@ -1,61 +0,0 @@
-package Test::Harness::Iterator;
-
-use strict;
-use vars qw($VERSION);
-$VERSION = 0.01;
-
-
-=head1 NAME
-
-Test::Harness::Iterator - Internal Test::Harness Iterator
-
-=head1 SYNOPSIS
-
- use Test::Harness::Iterator;
- use Test::Harness::Iterator;
- my $it = Test::Harness::Iterator->new(\*TEST);
- my $it = Test::Harness::Iterator->new(\@array);
-
- my $line = $it->next;
-
-
-=head1 DESCRIPTION
-
-B<FOR INTERNAL USE ONLY!>
-
-This is a simple iterator wrapper for arrays and filehandles.
-
-=cut
-
-sub new {
- my($proto, $thing) = @_;
-
- my $self = {};
- if( ref $thing eq 'GLOB' ) {
- bless $self, 'Test::Harness::Iterator::FH';
- $self->{fh} = $thing;
- }
- elsif( ref $thing eq 'ARRAY' ) {
- bless $self, 'Test::Harness::Iterator::ARRAY';
- $self->{idx} = 0;
- $self->{array} = $thing;
- }
- else {
- warn "Can't iterate with a ", ref $thing;
- }
-
- return $self;
-}
-
-package Test::Harness::Iterator::FH;
-sub next {
- my $fh = $_[0]->{fh};
- return scalar <$fh>;
-}
-
-
-package Test::Harness::Iterator::ARRAY;
-sub next {
- my $self = shift;
- return $self->{array}->[$self->{idx}++];
-}
diff --git a/Perl/t/Test/Harness/Straps.pm b/Perl/t/Test/Harness/Straps.pm
deleted file mode 100644
index 4d971b7..0000000
--- a/Perl/t/Test/Harness/Straps.pm
+++ /dev/null
@@ -1,667 +0,0 @@
-# -*- Mode: cperl; cperl-indent-level: 4 -*-
-# $Id$
-
-package Test::Harness::Straps;
-
-use strict;
-use vars qw($VERSION);
-use Config;
-$VERSION = '0.15';
-
-use Test::Harness::Assert;
-use Test::Harness::Iterator;
-
-# Flags used as return values from our methods. Just for internal
-# clarification.
-my $TRUE = (1==1);
-my $FALSE = !$TRUE;
-my $YES = $TRUE;
-my $NO = $FALSE;
-
-
-=head1 NAME
-
-Test::Harness::Straps - detailed analysis of test results
-
-=head1 SYNOPSIS
-
- use Test::Harness::Straps;
-
- my $strap = Test::Harness::Straps->new;
-
- # Various ways to interpret a test
- my %results = $strap->analyze($name, \@test_output);
- my %results = $strap->analyze_fh($name, $test_filehandle);
- my %results = $strap->analyze_file($test_file);
-
- # UNIMPLEMENTED
- my %total = $strap->total_results;
-
- # Altering the behavior of the strap UNIMPLEMENTED
- my $verbose_output = $strap->dump_verbose();
- $strap->dump_verbose_fh($output_filehandle);
-
-
-=head1 DESCRIPTION
-
-B<THIS IS ALPHA SOFTWARE> in that the interface is subject to change
-in incompatible ways. It is otherwise stable.
-
-Test::Harness is limited to printing out its results. This makes
-analysis of the test results difficult for anything but a human. To
-make it easier for programs to work with test results, we provide
-Test::Harness::Straps. Instead of printing the results, straps
-provide them as raw data. You can also configure how the tests are to
-be run.
-
-The interface is currently incomplete. I<Please> contact the author
-if you'd like a feature added or something change or just have
-comments.
-
-=head1 Construction
-
-=head2 C<new>
-
- my $strap = Test::Harness::Straps->new;
-
-Initialize a new strap.
-
-=cut
-
-sub new {
- my($proto) = shift;
- my($class) = ref $proto || $proto;
-
- my $self = bless {}, $class;
- $self->_init;
-
- return $self;
-}
-
-=head2 C<_init>
-
- $strap->_init;
-
-Initialize the internal state of a strap to make it ready for parsing.
-
-=cut
-
-sub _init {
- my($self) = shift;
-
- $self->{_is_vms} = $^O eq 'VMS';
- $self->{_is_win32} = $^O eq 'Win32';
-}
-
-=head1 Analysis
-
-=head2 C<analyze>
-
- my %results = $strap->analyze($name, \@test_output);
-
-Analyzes the output of a single test, assigning it the given C<$name>
-for use in the total report. Returns the C<%results> of the test.
-See L<Results>.
-
-C<@test_output> should be the raw output from the test, including
-newlines.
-
-=cut
-
-sub analyze {
- my($self, $name, $test_output) = @_;
-
- my $it = Test::Harness::Iterator->new($test_output);
- return $self->_analyze_iterator($name, $it);
-}
-
-
-sub _analyze_iterator {
- my($self, $name, $it) = @_;
-
- $self->_reset_file_state;
- $self->{file} = $name;
- my %totals = (
- max => 0,
- seen => 0,
-
- ok => 0,
- todo => 0,
- skip => 0,
- bonus => 0,
-
- details => []
- );
-
- # Set them up here so callbacks can have them.
- $self->{totals}{$name} = \%totals;
- while( defined(my $line = $it->next) ) {
- $self->_analyze_line($line, \%totals);
- last if $self->{saw_bailout};
- }
-
- $totals{skip_all} = $self->{skip_all} if defined $self->{skip_all};
-
- my $passed = ($totals{max} == 0 && defined $totals{skip_all}) ||
- ($totals{max} && $totals{seen} &&
- $totals{max} == $totals{seen} &&
- $totals{max} == $totals{ok});
- $totals{passing} = $passed ? 1 : 0;
-
- return %totals;
-}
-
-
-sub _analyze_line {
- my($self, $line, $totals) = @_;
-
- my %result = ();
-
- $self->{line}++;
-
- my $type;
- if( $self->_is_header($line) ) {
- $type = 'header';
-
- $self->{saw_header}++;
-
- $totals->{max} += $self->{max};
- }
- elsif( $self->_is_test($line, \%result) ) {
- $type = 'test';
-
- $totals->{seen}++;
- $result{number} = $self->{'next'} unless $result{number};
-
- # sometimes the 'not ' and the 'ok' are on different lines,
- # happens often on VMS if you do:
- # print "not " unless $test;
- # print "ok $num\n";
- if( $self->{saw_lone_not} &&
- ($self->{lone_not_line} == $self->{line} - 1) )
- {
- $result{ok} = 0;
- }
-
- my $pass = $result{ok};
- $result{type} = 'todo' if $self->{todo}{$result{number}};
-
- if( $result{type} eq 'todo' ) {
- $totals->{todo}++;
- $pass = 1;
- $totals->{bonus}++ if $result{ok}
- }
- elsif( $result{type} eq 'skip' ) {
- $totals->{skip}++;
- $pass = 1;
- }
-
- $totals->{ok}++ if $pass;
-
- if( $result{number} > 100000 && $result{number} > $self->{max} ) {
- warn "Enormous test number seen [test $result{number}]\n";
- warn "Can't detailize, too big.\n";
- }
- else {
- $totals->{details}[$result{number} - 1] =
- {$self->_detailize($pass, \%result)};
- }
-
- # XXX handle counter mismatch
- }
- elsif ( $self->_is_bail_out($line, \$self->{bailout_reason}) ) {
- $type = 'bailout';
- $self->{saw_bailout} = 1;
- }
- else {
- $type = 'other';
- }
-
- $self->{callback}->($self, $line, $type, $totals) if $self->{callback};
-
- $self->{'next'} = $result{number} + 1 if $type eq 'test';
-}
-
-=head2 C<analyze_fh>
-
- my %results = $strap->analyze_fh($name, $test_filehandle);
-
-Like C<analyze>, but it reads from the given filehandle.
-
-=cut
-
-sub analyze_fh {
- my($self, $name, $fh) = @_;
-
- my $it = Test::Harness::Iterator->new($fh);
- $self->_analyze_iterator($name, $it);
-}
-
-=head2 C<analyze_file>
-
- my %results = $strap->analyze_file($test_file);
-
-Like C<analyze>, but it runs the given C<$test_file> and parses its
-results. It will also use that name for the total report.
-
-=cut
-
-sub analyze_file {
- my($self, $file) = @_;
-
- unless( -e $file ) {
- $self->{error} = "$file does not exist";
- return;
- }
-
- unless( -r $file ) {
- $self->{error} = "$file is not readable";
- return;
- }
-
- local $ENV{PERL5LIB} = $self->_INC2PERL5LIB;
-
- my $cmd = $self->{_is_vms} ? "MCR $^X" :
- $self->{_is_win32} ? Win32::GetShortPathName($^X)
- : $^X;
-
- my $switches = $self->_switches($file);
-
- # *sigh* this breaks under taint, but open -| is unportable.
- unless( open(FILE, "$cmd $switches $file|") ) {
- print "can't run $file. $!\n";
- return;
- }
-
- my %results = $self->analyze_fh($file, \*FILE);
- my $exit = close FILE;
- $results{'wait'} = $?;
- if( $? && $self->{_is_vms} ) {
- eval q{use vmsish "status"; $results{'exit'} = $?};
- }
- else {
- $results{'exit'} = _wait2exit($?);
- }
- $results{passing} = 0 unless $? == 0;
-
- $self->_restore_PERL5LIB();
-
- return %results;
-}
-
-
-eval { require POSIX; &POSIX::WEXITSTATUS(0) };
-if( $@ ) {
- *_wait2exit = sub { $_[0] >> 8 };
-}
-else {
- *_wait2exit = sub { POSIX::WEXITSTATUS($_[0]) }
-}
-
-
-=head2 C<_switches>
-
- my $switches = $self->_switches($file);
-
-Formats and returns the switches necessary to run the test.
-
-=cut
-
-sub _switches {
- my($self, $file) = @_;
-
- local *TEST;
- open(TEST, $file) or print "can't open $file. $!\n";
- my $first = <TEST>;
- my $s = $Test::Harness::Switches || '';
- $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
- if exists $ENV{'HARNESS_PERL_SWITCHES'};
-
- if ($first =~ /^#!.*\bperl.*\s-\w*([Tt]+)/) {
- # When taint mode is on, PERL5LIB is ignored. So we need to put
- # all that on the command line as -Is.
- $s .= join " ", qq[ "-$1"], map {qq["-I$_"]} $self->_filtered_INC;
- }
- elsif ($^O eq 'MacOS') {
- # MacPerl's putenv is broken, so it will not see PERL5LIB.
- $s .= join " ", map {qq["-I$_"]} $self->_filtered_INC;
- }
-
- close(TEST) or print "can't close $file. $!\n";
-
- return $s;
-}
-
-
-=head2 C<_INC2PERL5LIB>
-
- local $ENV{PERL5LIB} = $self->_INC2PERL5LIB;
-
-Takes the current value of C<@INC> and turns it into something suitable
-for putting onto C<PERL5LIB>.
-
-=cut
-
-sub _INC2PERL5LIB {
- my($self) = shift;
-
- $self->{_old5lib} = $ENV{PERL5LIB};
-
- return join $Config{path_sep}, $self->_filtered_INC;
-}
-
-=head2 C<_filtered_INC>
-
- my @filtered_inc = $self->_filtered_INC;
-
-Shortens C<@INC> by removing redundant and unnecessary entries.
-Necessary for OSes with limited command line lengths, like VMS.
-
-=cut
-
-sub _filtered_INC {
- my($self, @inc) = @_;
- @inc = @INC unless @inc;
-
- # VMS has a 255-byte limit on the length of %ENV entries, so
- # toss the ones that involve perl_root, the install location
- # for VMS
- if( $self->{_is_vms} ) {
- @inc = grep !/perl_root/i, @inc;
- }
-
- return @inc;
-}
-
-
-=head2 C<_restore_PERL5LIB>
-
- $self->_restore_PERL5LIB;
-
-This restores the original value of the C<PERL5LIB> environment variable.
-Necessary on VMS, otherwise a no-op.
-
-=cut
-
-sub _restore_PERL5LIB {
- my($self) = shift;
-
- return unless $self->{_is_vms};
-
- if (defined $self->{_old5lib}) {
- $ENV{PERL5LIB} = $self->{_old5lib};
- }
-}
-
-=head1 Parsing
-
-Methods for identifying what sort of line you're looking at.
-
-=head2 C<_is_comment>
-
- my $is_comment = $strap->_is_comment($line, \$comment);
-
-Checks if the given line is a comment. If so, it will place it into
-C<$comment> (sans #).
-
-=cut
-
-sub _is_comment {
- my($self, $line, $comment) = @_;
-
- if( $line =~ /^\s*\#(.*)/ ) {
- $$comment = $1;
- return $YES;
- }
- else {
- return $NO;
- }
-}
-
-=head2 C<_is_header>
-
- my $is_header = $strap->_is_header($line);
-
-Checks if the given line is a header (1..M) line. If so, it places how
-many tests there will be in C<< $strap->{max} >>, a list of which tests
-are todo in C<< $strap->{todo} >> and if the whole test was skipped
-C<< $strap->{skip_all} >> contains the reason.
-
-=cut
-
-# Regex for parsing a header. Will be run with /x
-my $Extra_Header_Re = <<'REGEX';
- ^
- (?: \s+ todo \s+ ([\d \t]+) )? # optional todo set
- (?: \s* \# \s* ([\w:]+\s?) (.*) )? # optional skip with optional reason
-REGEX
-
-sub _is_header {
- my($self, $line) = @_;
-
- if( my($max, $extra) = $line =~ /^1\.\.(\d+)(.*)/ ) {
- $self->{max} = $max;
- assert( $self->{max} >= 0, 'Max # of tests looks right' );
-
- if( defined $extra ) {
- my($todo, $skip, $reason) = $extra =~ /$Extra_Header_Re/xo;
-
- $self->{todo} = { map { $_ => 1 } split /\s+/, $todo } if $todo;
-
- if( $self->{max} == 0 ) {
- $reason = '' unless defined $skip and $skip =~ /^Skip/i;
- }
-
- $self->{skip_all} = $reason;
- }
-
- return $YES;
- }
- else {
- return $NO;
- }
-}
-
-=head2 C<_is_test>
-
- my $is_test = $strap->_is_test($line, \%test);
-
-Checks if the $line is a test report (ie. 'ok/not ok'). Reports the
-result back in C<%test> which will contain:
-
- ok did it succeed? This is the literal 'ok' or 'not ok'.
- name name of the test (if any)
- number test number (if any)
-
- type 'todo' or 'skip' (if any)
- reason why is it todo or skip? (if any)
-
-If will also catch lone 'not' lines, note it saw them
-C<< $strap->{saw_lone_not} >> and the line in C<< $strap->{lone_not_line} >>.
-
-=cut
-
-my $Report_Re = <<'REGEX';
- ^
- (not\ )? # failure?
- ok\b
- (?:\s+(\d+))? # optional test number
- \s*
- (.*) # and the rest
-REGEX
-
-my $Extra_Re = <<'REGEX';
- ^
- (.*?) (?:(?:[^\\]|^)# (.*))?
- $
-REGEX
-
-sub _is_test {
- my($self, $line, $test) = @_;
-
- # We pulverize the line down into pieces in three parts.
- if( my($not, $num, $extra) = $line =~ /$Report_Re/ox ) {
- my($name, $control) = split /(?:[^\\]|^)#/, $extra if $extra;
- my($type, $reason) = $control =~ /^\s*(\S+)(?:\s+(.*))?$/ if $control;
-
- $test->{number} = $num;
- $test->{ok} = $not ? 0 : 1;
- $test->{name} = $name;
-
- if( defined $type ) {
- $test->{type} = $type =~ /^TODO$/i ? 'todo' :
- $type =~ /^Skip/i ? 'skip' : 0;
- }
- else {
- $test->{type} = '';
- }
- $test->{reason} = $reason;
-
- return $YES;
- }
- else{
- # Sometimes the "not " and "ok" will be on seperate lines on VMS.
- # We catch this and remember we saw it.
- if( $line =~ /^not\s+$/ ) {
- $self->{saw_lone_not} = 1;
- $self->{lone_not_line} = $self->{line};
- }
-
- return $NO;
- }
-}
-
-=head2 C<_is_bail_out>
-
- my $is_bail_out = $strap->_is_bail_out($line, \$reason);
-
-Checks if the line is a "Bail out!". Places the reason for bailing
-(if any) in $reason.
-
-=cut
-
-sub _is_bail_out {
- my($self, $line, $reason) = @_;
-
- if( $line =~ /^Bail out!\s*(.*)/i ) {
- $$reason = $1 if $1;
- return $YES;
- }
- else {
- return $NO;
- }
-}
-
-=head2 C<_reset_file_state>
-
- $strap->_reset_file_state;
-
-Resets things like C<< $strap->{max} >> , C<< $strap->{skip_all} >>,
-etc. so it's ready to parse the next file.
-
-=cut
-
-sub _reset_file_state {
- my($self) = shift;
-
- delete @{$self}{qw(max skip_all todo)};
- $self->{line} = 0;
- $self->{saw_header} = 0;
- $self->{saw_bailout}= 0;
- $self->{saw_lone_not} = 0;
- $self->{lone_not_line} = 0;
- $self->{bailout_reason} = '';
- $self->{'next'} = 1;
-}
-
-=head1 Results
-
-The C<%results> returned from C<analyze()> contain the following
-information:
-
- passing true if the whole test is considered a pass
- (or skipped), false if its a failure
-
- exit the exit code of the test run, if from a file
- wait the wait code of the test run, if from a file
-
- max total tests which should have been run
- seen total tests actually seen
- skip_all if the whole test was skipped, this will
- contain the reason.
-
- ok number of tests which passed
- (including todo and skips)
-
- todo number of todo tests seen
- bonus number of todo tests which
- unexpectedly passed
-
- skip number of tests skipped
-
-So a successful test should have max == seen == ok.
-
-
-There is one final item, the details.
-
- details an array ref reporting the result of
- each test looks like this:
-
- $results{details}[$test_num - 1] =
- { ok => is the test considered ok?
- actual_ok => did it literally say 'ok'?
- name => name of the test (if any)
- type => 'skip' or 'todo' (if any)
- reason => reason for the above (if any)
- };
-
-Element 0 of the details is test #1. I tried it with element 1 being
-#1 and 0 being empty, this is less awkward.
-
-=head2 C<_detailize>
-
- my %details = $strap->_detailize($pass, \%test);
-
-Generates the details based on the last test line seen. C<$pass> is
-true if it was considered to be a passed test. C<%test> is the results
-of the test you're summarizing.
-
-=cut
-
-sub _detailize {
- my($self, $pass, $test) = @_;
-
- my %details = ( ok => $pass,
- actual_ok => $test->{ok}
- );
-
- assert( !(grep !defined $details{$_}, keys %details),
- 'test contains the ok and actual_ok info' );
-
- # We don't want these to be undef because they are often
- # checked and don't want the checker to have to deal with
- # uninitialized vars.
- foreach my $piece (qw(name type reason)) {
- $details{$piece} = defined $test->{$piece} ? $test->{$piece} : '';
- }
-
- return %details;
-}
-
-=head1 EXAMPLES
-
-See F<examples/mini_harness.plx> for an example of use.
-
-=head1 AUTHOR
-
-Michael G Schwern C<< <schwern@pobox.com> >>, currently maintained by
-Andy Lester C<< <andy@petdance.com> >>.
-
-=head1 SEE ALSO
-
-L<Test::Harness>
-
-=cut
-
-
-1;
diff --git a/Perl/t/Test/More.pm b/Perl/t/Test/More.pm
deleted file mode 100644
index 03f7552..0000000
--- a/Perl/t/Test/More.pm
+++ /dev/null
@@ -1,1248 +0,0 @@
-package Test::More;
-
-use 5.004;
-
-use strict;
-use Test::Builder;
-
-
-# Can't use Carp because it might cause use_ok() to accidentally succeed
-# even though the module being used forgot to use Carp. Yes, this
-# actually happened.
-sub _carp {
- my($file, $line) = (caller(1))[1,2];
- warn @_, " at $file line $line\n";
-}
-
-
-
-require Exporter;
-use vars qw($VERSION @ISA @EXPORT %EXPORT_TAGS $TODO);
-$VERSION = '0.47';
-@ISA = qw(Exporter);
-@EXPORT = qw(ok use_ok require_ok
- is isnt like unlike is_deeply
- cmp_ok
- skip todo todo_skip
- pass fail
- eq_array eq_hash eq_set
- $TODO
- plan
- can_ok isa_ok
- diag
- );
-
-my $Test = Test::Builder->new;
-
-
-# 5.004's Exporter doesn't have export_to_level.
-sub _export_to_level
-{
- my $pkg = shift;
- my $level = shift;
- (undef) = shift; # redundant arg
- my $callpkg = caller($level);
- $pkg->export($callpkg, @_);
-}
-
-
-=head1 NAME
-
-Test::More - yet another framework for writing test scripts
-
-=head1 SYNOPSIS
-
- use Test::More tests => $Num_Tests;
- # or
- use Test::More qw(no_plan);
- # or
- use Test::More skip_all => $reason;
-
- BEGIN { use_ok( 'Some::Module' ); }
- require_ok( 'Some::Module' );
-
- # Various ways to say "ok"
- ok($this eq $that, $test_name);
-
- is ($this, $that, $test_name);
- isnt($this, $that, $test_name);
-
- # Rather than print STDERR "# here's what went wrong\n"
- diag("here's what went wrong");
-
- like ($this, qr/that/, $test_name);
- unlike($this, qr/that/, $test_name);
-
- cmp_ok($this, '==', $that, $test_name);
-
- is_deeply($complex_structure1, $complex_structure2, $test_name);
-
- SKIP: {
- skip $why, $how_many unless $have_some_feature;
-
- ok( foo(), $test_name );
- is( foo(42), 23, $test_name );
- };
-
- TODO: {
- local $TODO = $why;
-
- ok( foo(), $test_name );
- is( foo(42), 23, $test_name );
- };
-
- can_ok($module, @methods);
- isa_ok($object, $class);
-
- pass($test_name);
- fail($test_name);
-
- # Utility comparison functions.
- eq_array(\@this, \@that);
- eq_hash(\%this, \%that);
- eq_set(\@this, \@that);
-
- # UNIMPLEMENTED!!!
- my @status = Test::More::status;
-
- # UNIMPLEMENTED!!!
- BAIL_OUT($why);
-
-
-=head1 DESCRIPTION
-
-B<STOP!> If you're just getting started writing tests, have a look at
-Test::Simple first. This is a drop in replacement for Test::Simple
-which you can switch to once you get the hang of basic testing.
-
-The purpose of this module is to provide a wide range of testing
-utilities. Various ways to say "ok" with better diagnostics,
-facilities to skip tests, test future features and compare complicated
-data structures. While you can do almost anything with a simple
-C<ok()> function, it doesn't provide good diagnostic output.
-
-
-=head2 I love it when a plan comes together
-
-Before anything else, you need a testing plan. This basically declares
-how many tests your script is going to run to protect against premature
-failure.
-
-The preferred way to do this is to declare a plan when you C<use Test::More>.
-
- use Test::More tests => $Num_Tests;
-
-There are rare cases when you will not know beforehand how many tests
-your script is going to run. In this case, you can declare that you
-have no plan. (Try to avoid using this as it weakens your test.)
-
- use Test::More qw(no_plan);
-
-In some cases, you'll want to completely skip an entire testing script.
-
- use Test::More skip_all => $skip_reason;
-
-Your script will declare a skip with the reason why you skipped and
-exit immediately with a zero (success). See L<Test::Harness> for
-details.
-
-If you want to control what functions Test::More will export, you
-have to use the 'import' option. For example, to import everything
-but 'fail', you'd do:
-
- use Test::More tests => 23, import => ['!fail'];
-
-Alternatively, you can use the plan() function. Useful for when you
-have to calculate the number of tests.
-
- use Test::More;
- plan tests => keys %Stuff * 3;
-
-or for deciding between running the tests at all:
-
- use Test::More;
- if( $^O eq 'MacOS' ) {
- plan skip_all => 'Test irrelevant on MacOS';
- }
- else {
- plan tests => 42;
- }
-
-=cut
-
-sub plan {
- my(@plan) = @_;
-
- my $caller = caller;
-
- $Test->exported_to($caller);
-
- my @imports = ();
- foreach my $idx (0..$#plan) {
- if( $plan[$idx] eq 'import' ) {
- my($tag, $imports) = splice @plan, $idx, 2;
- @imports = @$imports;
- last;
- }
- }
-
- $Test->plan(@plan);
-
- __PACKAGE__->_export_to_level(1, __PACKAGE__, @imports);
-}
-
-sub import {
- my($class) = shift;
- goto &plan;
-}
-
-
-=head2 Test names
-
-By convention, each test is assigned a number in order. This is
-largely done automatically for you. However, it's often very useful to
-assign a name to each test. Which would you rather see:
-
- ok 4
- not ok 5
- ok 6
-
-or
-
- ok 4 - basic multi-variable
- not ok 5 - simple exponential
- ok 6 - force == mass * acceleration
-
-The later gives you some idea of what failed. It also makes it easier
-to find the test in your script, simply search for "simple
-exponential".
-
-All test functions take a name argument. It's optional, but highly
-suggested that you use it.
-
-
-=head2 I'm ok, you're not ok.
-
-The basic purpose of this module is to print out either "ok #" or "not
-ok #" depending on if a given test succeeded or failed. Everything
-else is just gravy.
-
-All of the following print "ok" or "not ok" depending on if the test
-succeeded or failed. They all also return true or false,
-respectively.
-
-=over 4
-
-=item B<ok>
-
- ok($this eq $that, $test_name);
-
-This simply evaluates any expression (C<$this eq $that> is just a
-simple example) and uses that to determine if the test succeeded or
-failed. A true expression passes, a false one fails. Very simple.
-
-For example:
-
- ok( $exp{9} == 81, 'simple exponential' );
- ok( Film->can('db_Main'), 'set_db()' );
- ok( $p->tests == 4, 'saw tests' );
- ok( !grep !defined $_, @items, 'items populated' );
-
-(Mnemonic: "This is ok.")
-
-$test_name is a very short description of the test that will be printed
-out. It makes it very easy to find a test in your script when it fails
-and gives others an idea of your intentions. $test_name is optional,
-but we B<very> strongly encourage its use.
-
-Should an ok() fail, it will produce some diagnostics:
-
- not ok 18 - sufficient mucus
- # Failed test 18 (foo.t at line 42)
-
-This is actually Test::Simple's ok() routine.
-
-=cut
-
-sub ok ($;$) {
- my($test, $name) = @_;
- $Test->ok($test, $name);
-}
-
-=item B<is>
-
-=item B<isnt>
-
- is ( $this, $that, $test_name );
- isnt( $this, $that, $test_name );
-
-Similar to ok(), is() and isnt() compare their two arguments
-with C<eq> and C<ne> respectively and use the result of that to
-determine if the test succeeded or failed. So these:
-
- # Is the ultimate answer 42?
- is( ultimate_answer(), 42, "Meaning of Life" );
-
- # $foo isn't empty
- isnt( $foo, '', "Got some foo" );
-
-are similar to these:
-
- ok( ultimate_answer() eq 42, "Meaning of Life" );
- ok( $foo ne '', "Got some foo" );
-
-(Mnemonic: "This is that." "This isn't that.")
-
-So why use these? They produce better diagnostics on failure. ok()
-cannot know what you are testing for (beyond the name), but is() and
-isnt() know what the test was and why it failed. For example this
-test:
-
- my $foo = 'waffle'; my $bar = 'yarblokos';
- is( $foo, $bar, 'Is foo the same as bar?' );
-
-Will produce something like this:
-
- not ok 17 - Is foo the same as bar?
- # Failed test (foo.t at line 139)
- # got: 'waffle'
- # expected: 'yarblokos'
-
-So you can figure out what went wrong without rerunning the test.
-
-You are encouraged to use is() and isnt() over ok() where possible,
-however do not be tempted to use them to find out if something is
-true or false!
-
- # XXX BAD! $pope->isa('Catholic') eq 1
- is( $pope->isa('Catholic'), 1, 'Is the Pope Catholic?' );
-
-This does not check if C<$pope->isa('Catholic')> is true, it checks if
-it returns 1. Very different. Similar caveats exist for false and 0.
-In these cases, use ok().
-
- ok( $pope->isa('Catholic') ), 'Is the Pope Catholic?' );
-
-For those grammatical pedants out there, there's an C<isn't()>
-function which is an alias of isnt().
-
-=cut
-
-sub is ($$;$) {
- $Test->is_eq(@_);
-}
-
-sub isnt ($$;$) {
- $Test->isnt_eq(@_);
-}
-
-*isn't = \&isnt;
-
-
-=item B<like>
-
- like( $this, qr/that/, $test_name );
-
-Similar to ok(), like() matches $this against the regex C<qr/that/>.
-
-So this:
-
- like($this, qr/that/, 'this is like that');
-
-is similar to:
-
- ok( $this =~ /that/, 'this is like that');
-
-(Mnemonic "This is like that".)
-
-The second argument is a regular expression. It may be given as a
-regex reference (i.e. C<qr//>) or (for better compatibility with older
-perls) as a string that looks like a regex (alternative delimiters are
-currently not supported):
-
- like( $this, '/that/', 'this is like that' );
-
-Regex options may be placed on the end (C<'/that/i'>).
-
-Its advantages over ok() are similar to that of is() and isnt(). Better
-diagnostics on failure.
-
-=cut
-
-sub like ($$;$) {
- $Test->like(@_);
-}
-
-
-=item B<unlike>
-
- unlike( $this, qr/that/, $test_name );
-
-Works exactly as like(), only it checks if $this B<does not> match the
-given pattern.
-
-=cut
-
-sub unlike {
- $Test->unlike(@_);
-}
-
-
-=item B<cmp_ok>
-
- cmp_ok( $this, $op, $that, $test_name );
-
-Halfway between ok() and is() lies cmp_ok(). This allows you to
-compare two arguments using any binary perl operator.
-
- # ok( $this eq $that );
- cmp_ok( $this, 'eq', $that, 'this eq that' );
-
- # ok( $this == $that );
- cmp_ok( $this, '==', $that, 'this == that' );
-
- # ok( $this && $that );
- cmp_ok( $this, '&&', $that, 'this || that' );
- ...etc...
-
-Its advantage over ok() is when the test fails you'll know what $this
-and $that were:
-
- not ok 1
- # Failed test (foo.t at line 12)
- # '23'
- # &&
- # undef
-
-It's also useful in those cases where you are comparing numbers and
-is()'s use of C<eq> will interfere:
-
- cmp_ok( $big_hairy_number, '==', $another_big_hairy_number );
-
-=cut
-
-sub cmp_ok($$$;$) {
- $Test->cmp_ok(@_);
-}
-
-
-=item B<can_ok>
-
- can_ok($module, @methods);
- can_ok($object, @methods);
-
-Checks to make sure the $module or $object can do these @methods
-(works with functions, too).
-
- can_ok('Foo', qw(this that whatever));
-
-is almost exactly like saying:
-
- ok( Foo->can('this') &&
- Foo->can('that') &&
- Foo->can('whatever')
- );
-
-only without all the typing and with a better interface. Handy for
-quickly testing an interface.
-
-No matter how many @methods you check, a single can_ok() call counts
-as one test. If you desire otherwise, use:
-
- foreach my $meth (@methods) {
- can_ok('Foo', $meth);
- }
-
-=cut
-
-sub can_ok ($@) {
- my($proto, @methods) = @_;
- my $class = ref $proto || $proto;
-
- unless( @methods ) {
- my $ok = $Test->ok( 0, "$class->can(...)" );
- $Test->diag(' can_ok() called with no methods');
- return $ok;
- }
-
- my @nok = ();
- foreach my $method (@methods) {
- local($!, $@); # don't interfere with caller's $@
- # eval sometimes resets $!
- eval { $proto->can($method) } || push @nok, $method;
- }
-
- my $name;
- $name = @methods == 1 ? "$class->can('$methods[0]')"
- : "$class->can(...)";
-
- my $ok = $Test->ok( !@nok, $name );
-
- $Test->diag(map " $class->can('$_') failed\n", @nok);
-
- return $ok;
-}
-
-=item B<isa_ok>
-
- isa_ok($object, $class, $object_name);
- isa_ok($ref, $type, $ref_name);
-
-Checks to see if the given $object->isa($class). Also checks to make
-sure the object was defined in the first place. Handy for this sort
-of thing:
-
- my $obj = Some::Module->new;
- isa_ok( $obj, 'Some::Module' );
-
-where you'd otherwise have to write
-
- my $obj = Some::Module->new;
- ok( defined $obj && $obj->isa('Some::Module') );
-
-to safeguard against your test script blowing up.
-
-It works on references, too:
-
- isa_ok( $array_ref, 'ARRAY' );
-
-The diagnostics of this test normally just refer to 'the object'. If
-you'd like them to be more specific, you can supply an $object_name
-(for example 'Test customer').
-
-=cut
-
-sub isa_ok ($$;$) {
- my($object, $class, $obj_name) = @_;
-
- my $diag;
- $obj_name = 'The object' unless defined $obj_name;
- my $name = "$obj_name isa $class";
- if( !defined $object ) {
- $diag = "$obj_name isn't defined";
- }
- elsif( !ref $object ) {
- $diag = "$obj_name isn't a reference";
- }
- else {
- # We can't use UNIVERSAL::isa because we want to honor isa() overrides
- local($@, $!); # eval sometimes resets $!
- my $rslt = eval { $object->isa($class) };
- if( $@ ) {
- if( $@ =~ /^Can't call method "isa" on unblessed reference/ ) {
- if( !UNIVERSAL::isa($object, $class) ) {
- my $ref = ref $object;
- $diag = "$obj_name isn't a '$class' it's a '$ref'";
- }
- } else {
- die <<WHOA;
-WHOA! I tried to call ->isa on your object and got some weird error.
-This should never happen. Please contact the author immediately.
-Here's the error.
-$@
-WHOA
- }
- }
- elsif( !$rslt ) {
- my $ref = ref $object;
- $diag = "$obj_name isn't a '$class' it's a '$ref'";
- }
- }
-
-
-
- my $ok;
- if( $diag ) {
- $ok = $Test->ok( 0, $name );
- $Test->diag(" $diag\n");
- }
- else {
- $ok = $Test->ok( 1, $name );
- }
-
- return $ok;
-}
-
-
-=item B<pass>
-
-=item B<fail>
-
- pass($test_name);
- fail($test_name);
-
-Sometimes you just want to say that the tests have passed. Usually
-the case is you've got some complicated condition that is difficult to
-wedge into an ok(). In this case, you can simply use pass() (to
-declare the test ok) or fail (for not ok). They are synonyms for
-ok(1) and ok(0).
-
-Use these very, very, very sparingly.
-
-=cut
-
-sub pass (;$) {
- $Test->ok(1, @_);
-}
-
-sub fail (;$) {
- $Test->ok(0, @_);
-}
-
-=back
-
-=head2 Diagnostics
-
-If you pick the right test function, you'll usually get a good idea of
-what went wrong when it failed. But sometimes it doesn't work out
-that way. So here we have ways for you to write your own diagnostic
-messages which are safer than just C<print STDERR>.
-
-=over 4
-
-=item B<diag>
-
- diag(@diagnostic_message);
-
-Prints a diagnostic message which is guaranteed not to interfere with
-test output. Handy for this sort of thing:
-
- ok( grep(/foo/, @users), "There's a foo user" ) or
- diag("Since there's no foo, check that /etc/bar is set up right");
-
-which would produce:
-
- not ok 42 - There's a foo user
- # Failed test (foo.t at line 52)
- # Since there's no foo, check that /etc/bar is set up right.
-
-You might remember C<ok() or diag()> with the mnemonic C<open() or
-die()>.
-
-B<NOTE> The exact formatting of the diagnostic output is still
-changing, but it is guaranteed that whatever you throw at it it won't
-interfere with the test.
-
-=cut
-
-sub diag {
- $Test->diag(@_);
-}
-
-
-=back
-
-=head2 Module tests
-
-You usually want to test if the module you're testing loads ok, rather
-than just vomiting if its load fails. For such purposes we have
-C<use_ok> and C<require_ok>.
-
-=over 4
-
-=item B<use_ok>
-
- BEGIN { use_ok($module); }
- BEGIN { use_ok($module, @imports); }
-
-These simply use the given $module and test to make sure the load
-happened ok. It's recommended that you run use_ok() inside a BEGIN
-block so its functions are exported at compile-time and prototypes are
-properly honored.
-
-If @imports are given, they are passed through to the use. So this:
-
- BEGIN { use_ok('Some::Module', qw(foo bar)) }
-
-is like doing this:
-
- use Some::Module qw(foo bar);
-
-don't try to do this:
-
- BEGIN {
- use_ok('Some::Module');
-
- ...some code that depends on the use...
- ...happening at compile time...
- }
-
-instead, you want:
-
- BEGIN { use_ok('Some::Module') }
- BEGIN { ...some code that depends on the use... }
-
-
-=cut
-
-sub use_ok ($;@) {
- my($module, @imports) = @_;
- @imports = () unless @imports;
-
- my $pack = caller;
-
- local($@,$!); # eval sometimes interferes with $!
- eval <<USE;
-package $pack;
-require $module;
-'$module'->import(\@imports);
-USE
-
- my $ok = $Test->ok( !$@, "use $module;" );
-
- unless( $ok ) {
- chomp $@;
- $Test->diag(<<DIAGNOSTIC);
- Tried to use '$module'.
- Error: $@
-DIAGNOSTIC
-
- }
-
- return $ok;
-}
-
-=item B<require_ok>
-
- require_ok($module);
-
-Like use_ok(), except it requires the $module.
-
-=cut
-
-sub require_ok ($) {
- my($module) = shift;
-
- my $pack = caller;
-
- local($!, $@); # eval sometimes interferes with $!
- eval <<REQUIRE;
-package $pack;
-require $module;
-REQUIRE
-
- my $ok = $Test->ok( !$@, "require $module;" );
-
- unless( $ok ) {
- chomp $@;
- $Test->diag(<<DIAGNOSTIC);
- Tried to require '$module'.
- Error: $@
-DIAGNOSTIC
-
- }
-
- return $ok;
-}
-
-=back
-
-=head2 Conditional tests
-
-Sometimes running a test under certain conditions will cause the
-test script to die. A certain function or method isn't implemented
-(such as fork() on MacOS), some resource isn't available (like a
-net connection) or a module isn't available. In these cases it's
-necessary to skip tests, or declare that they are supposed to fail
-but will work in the future (a todo test).
-
-For more details on the mechanics of skip and todo tests see
-L<Test::Harness>.
-
-The way Test::More handles this is with a named block. Basically, a
-block of tests which can be skipped over or made todo. It's best if I
-just show you...
-
-=over 4
-
-=item B<SKIP: BLOCK>
-
- SKIP: {
- skip $why, $how_many if $condition;
-
- ...normal testing code goes here...
- }
-
-This declares a block of tests that might be skipped, $how_many tests
-there are, $why and under what $condition to skip them. An example is
-the easiest way to illustrate:
-
- SKIP: {
- eval { require HTML::Lint };
-
- skip "HTML::Lint not installed", 2 if $@;
-
- my $lint = new HTML::Lint;
- isa_ok( $lint, "HTML::Lint" );
-
- $lint->parse( $html );
- is( $lint->errors, 0, "No errors found in HTML" );
- }
-
-If the user does not have HTML::Lint installed, the whole block of
-code I<won't be run at all>. Test::More will output special ok's
-which Test::Harness interprets as skipped, but passing, tests.
-It's important that $how_many accurately reflects the number of tests
-in the SKIP block so the # of tests run will match up with your plan.
-
-It's perfectly safe to nest SKIP blocks. Each SKIP block must have
-the label C<SKIP>, or Test::More can't work its magic.
-
-You don't skip tests which are failing because there's a bug in your
-program, or for which you don't yet have code written. For that you
-use TODO. Read on.
-
-=cut
-
-#'#
-sub skip {
- my($why, $how_many) = @_;
-
- unless( defined $how_many ) {
- # $how_many can only be avoided when no_plan is in use.
- _carp "skip() needs to know \$how_many tests are in the block"
- unless $Test::Builder::No_Plan;
- $how_many = 1;
- }
-
- for( 1..$how_many ) {
- $Test->skip($why);
- }
-
- local $^W = 0;
- last SKIP;
-}
-
-
-=item B<TODO: BLOCK>
-
- TODO: {
- local $TODO = $why if $condition;
-
- ...normal testing code goes here...
- }
-
-Declares a block of tests you expect to fail and $why. Perhaps it's
-because you haven't fixed a bug or haven't finished a new feature:
-
- TODO: {
- local $TODO = "URI::Geller not finished";
-
- my $card = "Eight of clubs";
- is( URI::Geller->your_card, $card, 'Is THIS your card?' );
-
- my $spoon;
- URI::Geller->bend_spoon;
- is( $spoon, 'bent', "Spoon bending, that's original" );
- }
-
-With a todo block, the tests inside are expected to fail. Test::More
-will run the tests normally, but print out special flags indicating
-they are "todo". Test::Harness will interpret failures as being ok.
-Should anything succeed, it will report it as an unexpected success.
-You then know the thing you had todo is done and can remove the
-TODO flag.
-
-The nice part about todo tests, as opposed to simply commenting out a
-block of tests, is it's like having a programmatic todo list. You know
-how much work is left to be done, you're aware of what bugs there are,
-and you'll know immediately when they're fixed.
-
-Once a todo test starts succeeding, simply move it outside the block.
-When the block is empty, delete it.
-
-
-=item B<todo_skip>
-
- TODO: {
- todo_skip $why, $how_many if $condition;
-
- ...normal testing code...
- }
-
-With todo tests, it's best to have the tests actually run. That way
-you'll know when they start passing. Sometimes this isn't possible.
-Often a failing test will cause the whole program to die or hang, even
-inside an C<eval BLOCK> with and using C<alarm>. In these extreme
-cases you have no choice but to skip over the broken tests entirely.
-
-The syntax and behavior is similar to a C<SKIP: BLOCK> except the
-tests will be marked as failing but todo. Test::Harness will
-interpret them as passing.
-
-=cut
-
-sub todo_skip {
- my($why, $how_many) = @_;
-
- unless( defined $how_many ) {
- # $how_many can only be avoided when no_plan is in use.
- _carp "todo_skip() needs to know \$how_many tests are in the block"
- unless $Test::Builder::No_Plan;
- $how_many = 1;
- }
-
- for( 1..$how_many ) {
- $Test->todo_skip($why);
- }
-
- local $^W = 0;
- last TODO;
-}
-
-=item When do I use SKIP vs. TODO?
-
-B<If it's something the user might not be able to do>, use SKIP.
-This includes optional modules that aren't installed, running under
-an OS that doesn't have some feature (like fork() or symlinks), or maybe
-you need an Internet connection and one isn't available.
-
-B<If it's something the programmer hasn't done yet>, use TODO. This
-is for any code you haven't written yet, or bugs you have yet to fix,
-but want to put tests in your testing script (always a good idea).
-
-
-=back
-
-=head2 Comparison functions
-
-Not everything is a simple eq check or regex. There are times you
-need to see if two arrays are equivalent, for instance. For these
-instances, Test::More provides a handful of useful functions.
-
-B<NOTE> These are NOT well-tested on circular references. Nor am I
-quite sure what will happen with filehandles.
-
-=over 4
-
-=item B<is_deeply>
-
- is_deeply( $this, $that, $test_name );
-
-Similar to is(), except that if $this and $that are hash or array
-references, it does a deep comparison walking each data structure to
-see if they are equivalent. If the two structures are different, it
-will display the place where they start differing.
-
-Barrie Slaymaker's Test::Differences module provides more in-depth
-functionality along these lines, and it plays well with Test::More.
-
-B<NOTE> Display of scalar refs is not quite 100%
-
-=cut
-
-use vars qw(@Data_Stack);
-my $DNE = bless [], 'Does::Not::Exist';
-sub is_deeply {
- my($this, $that, $name) = @_;
-
- my $ok;
- if( !ref $this || !ref $that ) {
- $ok = $Test->is_eq($this, $that, $name);
- }
- else {
- local @Data_Stack = ();
- if( _deep_check($this, $that) ) {
- $ok = $Test->ok(1, $name);
- }
- else {
- $ok = $Test->ok(0, $name);
- $ok = $Test->diag(_format_stack(@Data_Stack));
- }
- }
-
- return $ok;
-}
-
-sub _format_stack {
- my(@Stack) = @_;
-
- my $var = '$FOO';
- my $did_arrow = 0;
- foreach my $entry (@Stack) {
- my $type = $entry->{type} || '';
- my $idx = $entry->{'idx'};
- if( $type eq 'HASH' ) {
- $var .= "->" unless $did_arrow++;
- $var .= "{$idx}";
- }
- elsif( $type eq 'ARRAY' ) {
- $var .= "->" unless $did_arrow++;
- $var .= "[$idx]";
- }
- elsif( $type eq 'REF' ) {
- $var = "\${$var}";
- }
- }
-
- my @vals = @{$Stack[-1]{vals}}[0,1];
- my @vars = ();
- ($vars[0] = $var) =~ s/\$FOO/ \$got/;
- ($vars[1] = $var) =~ s/\$FOO/\$expected/;
-
- my $out = "Structures begin differing at:\n";
- foreach my $idx (0..$#vals) {
- my $val = $vals[$idx];
- $vals[$idx] = !defined $val ? 'undef' :
- $val eq $DNE ? "Does not exist"
- : "'$val'";
- }
-
- $out .= "$vars[0] = $vals[0]\n";
- $out .= "$vars[1] = $vals[1]\n";
-
- $out =~ s/^/ /msg;
- return $out;
-}
-
-
-=item B<eq_array>
-
- eq_array(\@this, \@that);
-
-Checks if two arrays are equivalent. This is a deep check, so
-multi-level structures are handled correctly.
-
-=cut
-
-#'#
-sub eq_array {
- my($a1, $a2) = @_;
- return 1 if $a1 eq $a2;
-
- my $ok = 1;
- my $max = $#$a1 > $#$a2 ? $#$a1 : $#$a2;
- for (0..$max) {
- my $e1 = $_ > $#$a1 ? $DNE : $a1->[$_];
- my $e2 = $_ > $#$a2 ? $DNE : $a2->[$_];
-
- push @Data_Stack, { type => 'ARRAY', idx => $_, vals => [$e1, $e2] };
- $ok = _deep_check($e1,$e2);
- pop @Data_Stack if $ok;
-
- last unless $ok;
- }
- return $ok;
-}
-
-sub _deep_check {
- my($e1, $e2) = @_;
- my $ok = 0;
-
-# my $eq;
- {
- # Quiet uninitialized value warnings when comparing undefs.
- local $^W = 0;
-
- if( $e1 eq $e2 ) {
- $ok = 1;
- }
- else {
- if( UNIVERSAL::isa($e1, 'ARRAY') and
- UNIVERSAL::isa($e2, 'ARRAY') )
- {
- $ok = eq_array($e1, $e2);
- }
- elsif( UNIVERSAL::isa($e1, 'HASH') and
- UNIVERSAL::isa($e2, 'HASH') )
- {
- $ok = eq_hash($e1, $e2);
- }
- elsif( UNIVERSAL::isa($e1, 'REF') and
- UNIVERSAL::isa($e2, 'REF') )
- {
- push @Data_Stack, { type => 'REF', vals => [$e1, $e2] };
- $ok = _deep_check($$e1, $$e2);
- pop @Data_Stack if $ok;
- }
- elsif( UNIVERSAL::isa($e1, 'SCALAR') and
- UNIVERSAL::isa($e2, 'SCALAR') )
- {
- push @Data_Stack, { type => 'REF', vals => [$e1, $e2] };
- $ok = _deep_check($$e1, $$e2);
- }
- else {
- push @Data_Stack, { vals => [$e1, $e2] };
- $ok = 0;
- }
- }
- }
-
- return $ok;
-}
-
-
-=item B<eq_hash>
-
- eq_hash(\%this, \%that);
-
-Determines if the two hashes contain the same keys and values. This
-is a deep check.
-
-=cut
-
-sub eq_hash {
- my($a1, $a2) = @_;
- return 1 if $a1 eq $a2;
-
- my $ok = 1;
- my $bigger = keys %$a1 > keys %$a2 ? $a1 : $a2;
- foreach my $k (keys %$bigger) {
- my $e1 = exists $a1->{$k} ? $a1->{$k} : $DNE;
- my $e2 = exists $a2->{$k} ? $a2->{$k} : $DNE;
-
- push @Data_Stack, { type => 'HASH', idx => $k, vals => [$e1, $e2] };
- $ok = _deep_check($e1, $e2);
- pop @Data_Stack if $ok;
-
- last unless $ok;
- }
-
- return $ok;
-}
-
-=item B<eq_set>
-
- eq_set(\@this, \@that);
-
-Similar to eq_array(), except the order of the elements is B<not>
-important. This is a deep check, but the irrelevancy of order only
-applies to the top level.
-
-B<NOTE> By historical accident, this is not a true set comparision.
-While the order of elements does not matter, duplicate elements do.
-
-=cut
-
-# We must make sure that references are treated neutrally. It really
-# doesn't matter how we sort them, as long as both arrays are sorted
-# with the same algorithm.
-sub _bogus_sort { local $^W = 0; ref $a ? 0 : $a cmp $b }
-
-sub eq_set {
- my($a1, $a2) = @_;
- return 0 unless @$a1 == @$a2;
-
- # There's faster ways to do this, but this is easiest.
- return eq_array( [sort _bogus_sort @$a1], [sort _bogus_sort @$a2] );
-}
-
-=back
-
-
-=head2 Extending and Embedding Test::More
-
-Sometimes the Test::More interface isn't quite enough. Fortunately,
-Test::More is built on top of Test::Builder which provides a single,
-unified backend for any test library to use. This means two test
-libraries which both use Test::Builder B<can be used together in the
-same program>.
-
-If you simply want to do a little tweaking of how the tests behave,
-you can access the underlying Test::Builder object like so:
-
-=over 4
-
-=item B<builder>
-
- my $test_builder = Test::More->builder;
-
-Returns the Test::Builder object underlying Test::More for you to play
-with.
-
-=cut
-
-sub builder {
- return Test::Builder->new;
-}
-
-=back
-
-
-=head1 NOTES
-
-Test::More is B<explicitly> tested all the way back to perl 5.004.
-
-Test::More is thread-safe for perl 5.8.0 and up.
-
-=head1 BUGS and CAVEATS
-
-=over 4
-
-=item Making your own ok()
-
-If you are trying to extend Test::More, don't. Use Test::Builder
-instead.
-
-=item The eq_* family has some caveats.
-
-=item Test::Harness upgrades
-
-no_plan and todo depend on new Test::Harness features and fixes. If
-you're going to distribute tests that use no_plan or todo your
-end-users will have to upgrade Test::Harness to the latest one on
-CPAN. If you avoid no_plan and TODO tests, the stock Test::Harness
-will work fine.
-
-If you simply depend on Test::More, it's own dependencies will cause a
-Test::Harness upgrade.
-
-=back
-
-
-=head1 HISTORY
-
-This is a case of convergent evolution with Joshua Pritikin's Test
-module. I was largely unaware of its existence when I'd first
-written my own ok() routines. This module exists because I can't
-figure out how to easily wedge test names into Test's interface (along
-with a few other problems).
-
-The goal here is to have a testing utility that's simple to learn,
-quick to use and difficult to trip yourself up with while still
-providing more flexibility than the existing Test.pm. As such, the
-names of the most common routines are kept tiny, special cases and
-magic side-effects are kept to a minimum. WYSIWYG.
-
-
-=head1 SEE ALSO
-
-L<Test::Simple> if all this confuses you and you just want to write
-some tests. You can upgrade to Test::More later (it's forward
-compatible).
-
-L<Test::Differences> for more ways to test complex data structures.
-And it plays well with Test::More.
-
-L<Test> is the old testing module. Its main benefit is that it has
-been distributed with Perl since 5.004_05.
-
-L<Test::Harness> for details on how your test results are interpreted
-by Perl.
-
-L<Test::Unit> describes a very featureful unit testing interface.
-
-L<Test::Inline> shows the idea of embedded testing.
-
-L<SelfTest> is another approach to embedded testing.
-
-
-=head1 AUTHORS
-
-Michael G Schwern E<lt>schwern@pobox.comE<gt> with much inspiration
-from Joshua Pritikin's Test module and lots of help from Barrie
-Slaymaker, Tony Bowden, chromatic and the perl-qa gang.
-
-
-=head1 COPYRIGHT
-
-Copyright 2001 by Michael G Schwern E<lt>schwern@pobox.comE<gt>.
-
-This program is free software; you can redistribute it and/or
-modify it under the same terms as Perl itself.
-
-See F<http://www.perl.com/perl/misc/Artistic.html>
-
-=cut
-
-1;