Test-Inter-1.09/0000755000175000001440000000000013442510561013011 5ustar sulbeckusersTest-Inter-1.09/lib/0000755000175000001440000000000012404053065013555 5ustar sulbeckusersTest-Inter-1.09/lib/Test/0000755000175000001440000000000013442446505014504 5ustar sulbeckusersTest-Inter-1.09/lib/Test/Inter.pm0000644000175000001440000012066713442251345016133 0ustar sulbeckuserspackage Test::Inter; # Copyright (c) 2010-2019 Sullivan Beck. All rights reserved. # This program is free software; you can redistribute it and/or modify it # under the same terms as Perl itself. ############################################################################### require 5.004; use warnings; use strict; use File::Basename; use IO::File; use Cwd 'abs_path'; our($VERSION); $VERSION = '1.09'; ############################################################################### # BASE METHODS ############################################################################### sub version { my($self) = @_; return $VERSION; } sub new { my($class,@args) = @_; my($name,%opts); if (@args % 2) { ($name,%opts) = @args; } else { $name = $0; $name =~ s,^\./,,; %opts = @args; } # The basic structure my $self = { 'name' => $name, # the name of the test script 'start' => 1, # the first test to run 'end' => 0, # the last test to end 'plan' => 0, # the number of tests planned 'abort' => 0, # abort on the first failed test 'quiet' => 0, # if 1, no output on successes # (this should only be done when # running as an interactive script) 'mode' => 'test', # mode to run script in 'width' => 80, # width of terminal 'features' => {}, # a list of available features 'use_lib' => 'on', # whether to run 'use lib' when loading # this module 'skipall' => '', # the reason for skipping all # remaining tests 'plandone' => 0, # 1 if a plan is done 'testsrun' => 0, # 1 if any tests have been run 'libdir' => '', # a directory to load modules from 'testdir' => '', # the test directory }; bless $self, $class; $main::TI_NUM = 0; # Handle options, environment variables, global variables my @opts = qw(start end testnum plan abort quiet mode width skip_all); my %o = map { $_,1 } @opts; no strict 'refs'; foreach my $opt (@opts) { if (! exists $o{$opt}) { $self->_die("Invalid option to new method: $opt"); } my $OPT = uc("ti_$opt"); if (exists $opts{opt} || exists $ENV{$OPT} || defined ${ "main::$OPT" }) { my $val; if (defined ${ "main::$OPT" }) { $val = ${ "main::$OPT" }; } elsif (exists $ENV{$OPT}) { $val = $ENV{$OPT}; } else { $val = $opts{$opt}; } &{ "Test::Inter::$opt" }($self,$val); } } if ($$self{'mode'} ne 'test') { print "\nRunning $name...\n"; } # We assume that the module is distributed in a directory with the correct # hierarchy. This is: # /some/path MODDIR # /t TESTDIR # /lib LIBDIR # We'll find the full path to each. my($moddir,$testdir,$libdir); if (-f "$0") { $moddir = dirname(dirname(abs_path($0))); } elsif (-d "./t") { $moddir = dirname(abs_path('.')); } elsif (-d "../t") { $moddir = dirname(abs_path('..')); } if (-d "$moddir/t") { $testdir = "$moddir/t"; } if (-d "$moddir/lib") { $libdir = "$moddir/lib"; } $$self{'moddir'} = $moddir; $$self{'testdir'} = $testdir; $$self{'libdir'} = $libdir; $self->use_lib(); return $self; } sub use_lib { my($self,$val) = @_; if (defined $val) { $$self{'use_lib'} = $val; return; } if ($$self{'use_lib'} eq 'on') { foreach my $dir ($$self{'libdir'},$$self{'testdir'}) { next if (! defined $dir); eval "use lib '$dir'"; } } } sub testdir { my($self,$req) = @_; if ($req && $req eq 'mod') { return $$self{'moddir'}; } elsif ($req && $req eq 'lib') { return $$self{'libdir'}; } return $$self{'testdir'}; } sub start { my($self,$val) = @_; $val = 1 if (! defined($val)); $self->_die("start requires an integer value") if ($val !~ /^\d+$/); $$self{'start'} = $val; } sub end { my($self,$val) = @_; $val = 0 if (! $val); $self->_die("end requires an integer value") if ($val !~ /^\d+$/); $$self{'end'} = $val; } sub testnum { my($self,$val) = @_; if (! defined($val)) { $$self{'start'} = 1; $$self{'end'} = 0; } else { $self->_die("testnum requires an integer value") if ($val !~ /^\d+$/); $$self{'start'} = $$self{'end'} = $val; } } sub plan { my($self,$val) = @_; if ($$self{'plandone'}) { $self->_die('Plan/done_testing included twice'); } $$self{'plandone'} = 1; $val = 0 if (! defined($val)); $self->_die("plan requires an integer value") if ($val !~ /^\d+$/); $$self{'plan'} = $val; if ($val != 0) { $self->_plan($val); } } sub done_testing { my($self,$val) = @_; if ($$self{'plandone'}) { $self->_die('Plan/done_testing included twice'); } $$self{'plandone'} = 1; $val = $main::TI_NUM if (! $val); $self->_die("done_testing requires an integer value") if ($val !~ /^\d+$/); $self->_plan($val); if ($val != $main::TI_NUM) { $self->_die("Ran $main::TI_NUM tests, expected $val"); } } sub abort { my($self,$val) = @_; $val = 0 if (! $val); $$self{'abort'} = $val; } sub quiet { my($self,$val) = @_; $val = 0 if (! $val); $$self{'quiet'} = $val; } sub mode { my($self,$val) = @_; $val = 'test' if (! $val); $$self{'mode'} = $val; } sub width { my($self,$val) = @_; $val = 0 if (! $val); $$self{'width'} = $val; } sub skip_all { my($self,$reason,@features) = @_; if (@features) { my $skip = 0; foreach my $feature (@features) { if (! exists $$self{'features'}{$feature} || ! $$self{'features'}{$feature}) { $skip = 1; $reason = "Required feature ($feature) missing" if (! $reason); last; } } return if (! $skip); } if ($$self{'plandone'} || $$self{'testsrun'}) { $reason = 'Remaining tests skipped' if (! $reason); $$self{'skipall'} = $reason; } else { $reason = 'Test script skipped' if (! $reason); $self->_plan(0,$reason); exit 0; } } sub _die { my($self,$message) = @_; print "ERROR: $message\n"; exit 1; } sub feature { my($self,$feature,$val) = @_; $$self{'features'}{$feature} = $val; } sub diag { my($self,$message) = @_; return if ($$self{'quiet'} == 2); $self->_diag($message); } sub note { my($self,$message) = @_; return if ($$self{'quiet'}); $self->_diag($message); } ############################################################################### # LOAD METHODS ############################################################################### # The routines were originally from Test::More (though they have been # changed... some to a greater extent than others). sub require_ok { my($self,$module,$mode) = @_; $mode = '' if (! $mode); $main::TI_NUM++ unless ($mode eq 'feature'); my $pack = caller; my @inc = map { "unshift(\@INC,'$_');\n" } ($$self{'libdir'},$$self{'testdir'}); my($desc,$code); if ( $module =~ /^\d+(?:\.\d+)?$/ ) { # A perl version check. $desc = "require perl $module"; $code = <_is_module_name($module); $desc = "require $module"; my $p = "package"; # So the following do not get picked up by cpantorpm-depreq my $r = "require"; $code = <_eval($code); chomp($eval_error); my @eval_error = split(/\n/,$eval_error); foreach my $err (@eval_error) { $err =~ s/ \(\@INC contains.*//; # strip out the actual @INC values } my $ok = 1; if ($eval_result) { # Able to load the module if ($mode eq 'forbid') { $$self{'skipall'} = 'Loaded a module not supposed to be present'; $self->_not_ok($desc); $self->_diag('Test required that module not be loadable') unless ($$self{'quiet'} == 2); $ok = 0; } elsif ($mode eq 'feature') { $self->feature($module,1); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature available: $module"); } } else { $self->_ok($desc); } } else { # Unable to load the module if ($mode eq 'forbid') { $self->_ok($desc); } elsif ($mode eq 'feature') { $self->feature($module,0); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature unavailable: $module"); } } else { $$self{'skipall'} = 'Unable to load a required module'; $self->_not_ok($desc); $ok = 0; } } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $err (@eval_error) { $self->_diag($err); } } sub use_ok { my($self,@args) = @_; my $mode = ''; if ($args[$#args] eq 'forbid' || $args[$#args] eq 'feature') { $mode = pop(@args); } $main::TI_NUM++ unless ($mode eq 'feature'); my $pack = caller; my($code,$desc,$module); if ( @args == 1 and $args[0] =~ /^\d+(?:\.\d+)?$/ ) { # A perl version check. $desc = "require perl $args[0]"; $module = 'perl'; $code = <_is_module_name($module)) { $self->_not_ok("use module: invalid module name: $module"); return; } my $vers = ''; if ( @args and $args[0] =~ /^\d+(?:\.\d+)?$/ ) { $vers = shift(@args); } my $imports = (@args ? 'qw(' . join(' ',@args) . ')' : ''); $desc = "use $module $vers $imports"; my @inc = map { "unshift(\@INC,'$_');\n" } ($$self{'libdir'},$$self{'testdir'}); my $p = "package"; # So the following do not get picked up by cpantorpm-depreq $code = <_not_ok('use module: no module specified'); return; } $desc .= ' (should not load)' if ($mode eq 'forbid'); $desc .= ' (feature)' if ($mode eq 'feature'); my($eval_result,$eval_error) = $self->_eval($code); chomp($eval_error); my @eval_error = split(/\n/,$eval_error); @eval_error = grep(!/^BEGIN failed--compilation aborted/,@eval_error); foreach my $err (@eval_error) { $err =~ s/ \(\@INC contains.*//; # strip out the actual @INC values } my $ok = 1; if ($eval_result) { # Able to load the module if ($mode eq 'forbid') { $$self{'skipall'} = 'Loaded a module not supposed to be present'; $self->_not_ok($desc); $self->_diag('Test required that module not be usable') unless ($$self{'quiet'} == 2); $ok = 0; } elsif ($mode eq 'feature') { $self->feature($module,1); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature available: $module"); } } else { $self->_ok($desc); } } else { # Unable to load the module if ($mode eq 'forbid') { $self->_ok($desc); } elsif ($mode eq 'feature') { $self->feature($module,0); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature unavailable: $module"); } } else { $$self{'skipall'} = 'Unable to load a required module'; $self->_not_ok($desc); $ok = 0; } } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $err (@eval_error) { $self->_diag($err); } } sub _is_module_name { my($self,$module) = @_; # Module names start with a letter. # End with an alphanumeric. # The rest is an alphanumeric or :: $module =~ s/\b::\b//g; return $module =~ /^[a-zA-Z]\w*$/ ? 1 : 0; } sub _eval { my($self,$code) = @_; my( $sigdie, $eval_result, $eval_error ); { local( $@, $!, $SIG{__DIE__} ); # isolate eval $eval_result = eval $code; $eval_error = $@; $sigdie = $SIG{__DIE__} || undef; } # make sure that $code got a chance to set $SIG{__DIE__} $SIG{__DIE__} = $sigdie if defined $sigdie; return( $eval_result, $eval_error ); } ############################################################################### # OK/IS/ISNT METHODS ############################################################################### sub ok { my($self,@args) = @_; $main::TI_NUM++; my($op,@ret) = $self->_ok_result(@args); my($name,@diag); my $ok = 1; if ($op eq 'skip') { my $reason = shift(@ret); $self->_skip($reason); } elsif ($op eq 'pass') { ($name,@diag) = @ret; $self->_ok($name); } else { ($name,@diag) = @ret; $self->_not_ok($name); $ok = 0; } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $diag (@diag) { $self->_diag($diag); } } sub _ok_result { my($self,@args) = @_; # Test if we're skipping this test my($skip,$reason) = $self->_skip_test(); return ('skip',$reason) if ($skip); # No args == always pass if (@args == 0) { return ('pass','Empty test'); } # Get the result my($func,$funcargs,$result) = $self->_get_result(\@args); # Get name/expected my($name,$expected); if (@args == 1) { $name = $args[0]; } elsif (@args == 2) { ($expected,$name) = @args; } elsif (@args > 2) { return(0,'','Improperly formed test: too many arguments'); } # Check the result my($pass,@diag) = $self->_cmp_result('ok',$func,$funcargs,$result,$expected); return($pass,$name,@diag); } sub is { my($self,@args) = @_; $self->_is("is",@args); } sub isnt { my($self,@args) = @_; $self->_is("isnt",@args); } sub _is { my($self,$is,@args) = @_; $main::TI_NUM++; my($op,@ret) = $self->_is_result($is,@args); my($name,@diag); my $ok = 1; if ($op eq 'skip') { my $reason = shift(@ret); $self->_skip($reason); } elsif ($op eq 'pass') { ($name,@diag) = @ret; $self->_ok($name); } else { ($name,@diag) = @ret; $self->_not_ok($name); $ok = 0; } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $diag (@diag) { $self->_diag($diag); } } sub _is_result { my($self,$is,@args) = @_; # Test if we're skipping this test my($skip,$reason) = $self->_skip_test(); return ('skip',$reason) if ($skip); # Test args if (@args < 2) { return ('fail','','Improperly formed test: too few arguments'); } my($func,$funcargs,$result) = $self->_get_result(\@args); my($name,$expected); if (@args == 1) { ($expected) = @args; } elsif (@args == 2) { ($expected,$name) = @args; } else { return(0,'','Improperly formed test: too many arguments'); } # Check the result my($pass,@diag) = $self->_cmp_result($is,$func,$funcargs,$result,$expected); return($pass,$name,@diag); } # Returns $func,$args and $results. The first two are returned only if # there is a function. # sub _get_result { my($self,$args) = @_; my($func,@funcargs,@result,$result); if (ref($$args[0]) eq 'CODE') { $func = shift(@$args); if (ref($$args[0]) eq 'ARRAY') { @funcargs = @{ $$args[0] }; shift(@$args); } @result = &$func(@funcargs); return ($func,\@funcargs,\@result); } elsif (ref($$args[0]) eq 'ARRAY') { @result = @{ $$args[0] }; shift(@$args); return ('','',\@result); } else { $result = shift(@$args); return ('','',$result); } } sub _cmp_result { my($self,$type,$func,$funcargs,$result,$expected) = @_; my $pass = 0; my $identical = 0; my @diag; if ($type eq 'ok') { if (ref($result) eq 'ARRAY') { foreach my $ele (@$result) { $pass = 1 if (defined($ele)); } } elsif (ref($result) eq 'HASH') { foreach my $key (keys %$result) { my $val = $$result{$key}; $pass = 1 if (defined($val)); } } else { $pass = ($result ? 1 : 0); } if (! defined($expected)) { # If no expected result passed in, we don't test the results $identical = 1; } else { # Results/expected must be the same structure $identical = $self->_cmp_structure($result,$expected); } } else { $identical = $self->_cmp_structure($result,$expected); if ($type eq 'is') { $pass = $identical; } else { $pass = 1 - $identical; } } if (! $identical && $type ne 'isnt') { if ($func) { push(@diag,"Arguments: " . $self->_stringify($funcargs)); } push(@diag, "Results : " . $self->_stringify($result)); push(@diag, "Expected : " . $self->_stringify($expected)) unless ($type eq 'ok' && ! defined($result)); } return (($pass ? 'pass' : 'fail'),@diag); } # Turn a data structure into a string (poor-man's Data::Dumper) sub _stringify { my($self,$s) = @_; my($str) = $self->__stringify($s); my($width) = $$self{'width'}; if ($width) { $width -= 21; # The leading string $width = 10 if ($width < 10); $str = substr($str,0,$width) if (length($str)>$width); } return $str; } sub __stringify { my($self,$s) = @_; if (! defined($s)) { return '__undef__'; } elsif (ref($s) eq 'ARRAY') { my $str = '[ '; foreach my $val (@$s) { $str .= $self->__stringify($val) . ' '; } $str .= ']'; return $str; } elsif (ref($s) eq 'HASH') { my $str = '{ '; foreach my $key (sort keys %$s) { my $key = $self->__stringify($key); my $val = $self->__stringify($$s{$key}); $str .= "$key=>$val "; } $str .= '}'; return $str; } elsif (ref($s)) { return '<' . ref($s) . '>'; } elsif ($s eq '') { return "''"; } else { if ($s =~ /\s/) { my $q = qr/\'/; # single quote my $qq = qr/\"/; # double quote if ($s !~ $q) { return "'$s'"; } if ($s !~ $qq) { return '"' . $s . '"'; } return "<$s>"; } else { return $s; } } } sub _cmp_structure { my($self,$s1,$s2) = @_; return 1 if (! defined($s1) && ! defined($s2)); # undef = undef return 0 if (! defined($s1) || ! defined($s2)); # undef != def return 0 if (ref($s1) ne ref($s2)); # must be same type if (ref($s1) eq 'ARRAY') { return 0 if ($#$s1 != $#$s2); # two lists must be the same length foreach (my $i=0; $i<=$#$s1; $i++) { return 0 unless $self->_cmp_structure($$s1[$i],$$s2[$i]); } return 1; } elsif (ref($s1) eq 'HASH') { my @k1 = keys %$s1; my @k2 = keys %$s2; return 0 if ($#k1 != $#k2); # two hashes must be the same length foreach my $key (@k1) { return 0 if (! exists $$s2{$key}); # keys must be the same return 0 unless $self->_cmp_structure($$s1{$key},$$s2{$key}); } return 1; } elsif (ref($s1)) { # Two references (other than ARRAY and HASH are assumed equal. return 1; } else { # Two scalars are compared stringwise return ($s1 eq $s2); } } sub _skip_test { my($self) = @_; if ($$self{'skipall'}) { return (1,$$self{'skipall'}); } elsif ( $main::TI_NUM < $$self{'start'} || ($$self{'end'} && $main::TI_NUM > $$self{'end'}) ) { return (1,'Test not in list of tests specified to run'); } return 0; } ############################################################################### # FILE METHOD ############################################################################### sub file { my($self,$func,$input,$outputdir,$expected,$name,@args) = @_; $name = "" if (! $name); if (! ref($func) eq 'CODE') { $self->_die("file method required a coderef"); } my @funcargs; my $testdir = $$self{'testdir'}; # Input file if ($input) { if (-r $input) { # Nothing } elsif (-r "$testdir/$input") { $input = "$testdir/$input"; } else { $self->_die("Input file not readable: $input"); } push(@funcargs,$input); } # Output file and directory if (! $outputdir) { if (-d $testdir && -w $testdir) { $outputdir = $testdir; } else { $outputdir = "."; } } if ($outputdir) { if (! -d $outputdir || ! -w $outputdir) { $self->_die("Output directory not writable: $outputdir"); } } my $output = "$outputdir/tmp_test_inter"; push(@funcargs,$output); # Expected output if (! $expected) { $self->_die("Expected output file not specified"); } elsif (-r $expected) { # Nothing } elsif (-r "$testdir/$expected") { $expected = "$testdir/$expected"; } else { $self->_die("Expected output file not readable: $expected"); } # Create the temporary output file. &$func(@funcargs,@args); if (! -r "$output") { $self->_die("Output file not created"); } # Test each line my $in = new IO::File; $in->open($output); my @out = <$in>; $in->close(); chomp(@out); $in->open($expected); my @exp = <$in>; $in->close(); chomp(@exp); unlink($output) if (! $ENV{'TI_NOCLEAN'}); while (@out < @exp) { push(@out,''); } while (@exp < @out) { push(@exp,''); } for (my $i=0; $i<@out; $i++) { my $line = $i+1; my $n = ($name ? "$name : Line $line" : "Line $line"); $self->_is('is',$out[$i],$exp[$i],$n); } } ############################################################################### # TESTS METHOD ############################################################################### sub tests { my($self,%opts) = @_; # # feature => [ FEATURE, FEATURE, ... ] # disable => [ FEATURE, FEATURE, ... ] # my $skip = ''; if (exists $opts{'feature'}) { foreach my $feature (@{ $opts{'feature'} }) { $skip = "Required feature unavailable: $feature", last if (! exists $$self{'features'}{$feature}); } } if (exists $opts{'disable'} && ! $skip) { foreach my $feature (@{ $opts{'disable'} }) { $skip = "Disabled due to feature being available: $feature", last if (exists $$self{'features'}{$feature}); } } # # name => NAME # skip => REASON # todo => 0/1 # my $name = ''; if (exists $opts{'name'}) { $name = $opts{'name'}; } if (exists $opts{'skip'}) { $skip = $opts{'skip'}; } my $todo = 0; if (exists $opts{'todo'}) { $todo = $opts{'todo'}; } # # tests => STRING OR LISTREF # func => CODEREF # expected => STRING OR LISTREF # # tests if (! exists $opts{'tests'}) { $self->_die("invalid test format: tests required"); } my $tests = $opts{'tests'}; my(%tests,$gotexpected); my($ntest,$nexp); if (ref($tests) eq 'ARRAY') { my @results = @$tests; $ntest = 0; foreach my $result (@results) { $ntest++; $tests{$ntest}{'err'} = 0; if (ref($result) eq 'ARRAY') { $tests{$ntest}{'args'} = $result; } else { $tests{$ntest}{'args'} = [$result]; } } $gotexpected = 0; } else { ($ntest,$gotexpected,%tests) = $self->_parse($tests); $nexp = $ntest if ($gotexpected); } # expected if (exists $opts{'expected'}) { if ($gotexpected) { $self->_die("invalid test format: expected results included twice"); } my $expected = $opts{'expected'}; if (ref($expected) eq 'ARRAY') { my @exp = @$expected; $nexp = 0; foreach my $exp (@exp) { $nexp++; if (ref($exp) eq 'ARRAY') { $tests{$nexp}{'expected'} = $exp; } else { $tests{$nexp}{'expected'} = [$exp]; } } } else { my($g,%t); ($nexp,$g,%t) = $self->_parse($expected); if ($g) { $self->_die("invalid test format: expected results contain '=>'"); } foreach my $t (1..$nexp) { $tests{$t}{'expected'} = $t{$t}{'args'}; } } $gotexpected = 1; } if ($gotexpected && ($nexp != 1 && $nexp != $ntest)) { $self->_die("invalid test format: number expected results differs from number of tests"); } # func my $func; if (exists $opts{'func'}) { $func = $opts{'func'}; if (ref($func) ne 'CODE') { $self->_die("invalid test format: func must be a code reference"); } } # # Compare results # foreach my $t (1..$ntest) { $main::TI_NUM++; if ($skip) { $self->_skip($skip,$name); next; } if ($tests{$t}{'err'}) { $self->_not_ok($name); $self->diag($tests{$t}{'err'}); next; } my($op,@ret); # Test results if ($gotexpected) { # Do an 'is' test my @a = ('is'); push(@a,$func) if ($func); push(@a,$tests{$t}{'args'}); push(@a,($nexp == 1 ? $tests{'1'}{'expected'} : $tests{$t}{'expected'})); push(@a,$name); ($op,@ret) = $self->_is_result(@a); } else { # Do an 'ok' test my $result = $tests{$t}{'args'}; if (@$result == 1) { $result = $$result[0]; } ($op,@ret) = $self->_ok_result($result,$name); } # Print it out my($name,@diag); my $ok = 1; if ($op eq 'skip') { my $reason = shift(@ret); $self->_skip($reason); } elsif ($op eq 'pass') { ($name,@diag) = @ret; $self->_ok($name); } else { ($name,@diag) = @ret; $self->_not_ok($name); $ok = 0; } next if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $diag (@diag) { $self->_diag($diag); } } } ############################################################################### # TAP METHODS ############################################################################### sub _diag { my($self,$message) = @_; print '#' . ' 'x10 . "$message\n"; } sub _plan { my($self,$n,$reason) = @_; $reason = '' if (! $reason); if ($$self{'mode'} eq 'test') { # Test mode if (! $n) { $reason = '' if (! $reason); print "1..0 # Skipped $reason\n"; return; } print "1..$n\n"; } else { if (! $n) { print " All tests skipped: $reason\n"; } else { print " Epected number of tests: $n\n" unless ($$self{'quiet'}); } } } sub _ok { my($self,$name) = @_; $name = '' if (! $name); $name =~ s/\#//; $$self{'testsrun'} = 1; return if ($$self{'mode'} ne 'test' && $$self{'quiet'}); print "ok $main::TI_NUM" . ' 'x(8-length($main::TI_NUM)) . "$name\n"; if ($name =~ /^\d/ && $$self{'quiet'} != 2) { $self->_diag('It is strongly recommended that the name of a test not'); $self->_diag('begin with a digit so it will not be confused with the'); $self->_diag('test number.'); } } sub _not_ok { my($self,$name) = @_; $name = '' if (! $name); $name =~ s/\#//; $$self{'testsrun'} = 1; print "not ok $main::TI_NUM" . ' 'x(4-length($main::TI_NUM)) . "$name\n"; if ($$self{'abort'} == 2) { exit 1; } elsif ($$self{'abort'}) { $$self{'skipall'} = 'Tests aborted due to failed test'; } } sub _skip { my($self,$reason,$name) = @_; $name = '' if (! $name); $name =~ s/\#//; $$self{'testsrun'} = 1; return if ($$self{'mode'} ne 'test' && $$self{'quiet'}); print "ok $main::TI_NUM" . ' 'x(8-length($main::TI_NUM)) . ($name ? "$name " : "") . "# SKIPPED $reason\n"; } ############################################################################### # TEST PARSING METHODS ############################################################################### { my $l; # current line number my $sp_opt = qr/\s*/; # optional whitespace my $sp = qr/\s+/; # required whitespace my $lparen = qr/\(/; # opening paren my $lbrack = qr/\[/; # opening brack my $lbrace = qr/\{/; # opening brace my $q = qr/\'/; # single quote my $qq = qr/\"/; # double quote my $token = qr/\S+/; # a token of non-whitespace characters my $min_str = qr/.*?/; # a minimum length string my $results = qr/=>/; # the string to switch to results # We'll also need to match delimiters and other special characters that # signal the end of a token. The default delimiter is just whitespace, # both other end-of-token regular expressions will include closing # parens, delimiters, etc. # # The end-of-token regexp will return a match for a special character (if # any) that terminates the token. If a token ends a whitespace or EOL, # nothing is matched. # my $eot = qr/()(?:\s+|$)/; # Allowed delimiters is anything except () [] {} alphanumeric, # underscore, and whitespace. # my $delim = qr/[^\'\"\(\)\[\]\{\}a-zA-Z0-9_ \t]/; # This takes a string which may contain a partial or complete # descritpion of any number of tests, and parses it. # # The string is multiline, and tests must be separated from each other # by one or more blank lines. Lines starting with a pound sign (#) # are comments. # # A test may include arguments (or obtained results), expected results, # or both. # # Returns # ($n,$gotboth,%tests) # where # $n is the number of tests # $gotboth is 1 if both arguments and expected results are obtained # $tests{$i} is the i'th test. # sub _parse { my($self,$string) = @_; my $t = 0; my $gotboth = -1; my %tests = (); # Split on newlines $string = [ split(/\n/s,$string) ]; $t = 0; while (@$string) { my $test = $self->_next_test($string); last if (! @$test); # All tests must contain both args/results OR only one of them. my ($err,$both,$args,$results) = $self->_parse_test($test); if ($gotboth == -1) { $gotboth = $both; } elsif ($gotboth != $both) { $err = "Malformed test [$l]: expected results for some tests, not others"; } $t++; $tests{$t}{'err'} = $err; $tests{$t}{'args'} = $args; $tests{$t}{'expected'} = $results if ($gotboth); } return ($t,$gotboth,%tests); } # Get all lines up to the end of lines or a blank line. Both # signal the end of a test. # sub _next_test { my($self,$list) = @_; my @test; my $started = 0; while (1) { last if (! @$list); my $line = shift(@$list); $line =~ s/^\s*//; $line =~ s/\s*$//; # If it's a blank line, add it to the test. If we've # already done test lines, then this signals the end # of the test. Otherwise, this is before the test, # so keep looking. if ($line eq '') { push(@test,$line); next if (! $started); last; } # Comments are added to the test as a blank line. if ($line =~ /^#/) { push(@test,''); next; } push(@test,$line); $started = 1; } return [] if (! $started); return \@test; } # Parse an entire test. Look for arguments, =>, and expected results. # sub _parse_test { my($self,$test) = @_; my($err,$both,@args,@results); my $curr = 'args'; while (@$test) { last if (! $self->_test_line($test)); # Check for '=>' if ($self->_parse_begin_results($test)) { if ($curr eq 'args') { $curr = 'results'; } else { return ("Malformed test [$l]: '=>' found twice"); } next; } # Get the next item(s) to add. my($err,$match,@val) = $self->_parse_token($test,$eot); return ($err) if ($err); if ($curr eq 'args') { push(@args,@val); } else { push(@results,@val); } } $both = ($curr eq 'results' ? 1 : 0); return ("",$both,\@args,\@results); } # Makes sure that the first line in the test contains # something. Blank lines are ignored. # sub _test_line { my($self,$test) = @_; while (@$test && (! defined($$test[0]) || $$test[0] eq '')) { shift(@$test); $l++; next; } return 1 if (@$test); return 0; } # Check for '=>'. # # Return 1 if found, 0 otherwise. # sub _parse_begin_results { my($self,$test) = @_; return 1 if ($$test[0] =~ s/^ $sp_opt $results $eot //x); return 0; } # Gets the next item to add to the current list. # # Returns ($err,$match,@val) where $match is the character that # matched the end of the current element (either a delimiter, # closing character, or nothing if the element ends on # whitespace/newline). # sub _parse_token { my($self,$test,$EOT) = @_; my($err,$found,$match,@val); { last if (! $self->_test_line($test)); # Check for quoted ($err,$found,$match,@val) = $self->_parse_quoted($test,$EOT); last if ($err); if ($found) { # '' remains '' last; } # Check for open ($err,$found,$match,@val) = $self->_parse_open_close($test,$EOT,$lparen,')'); last if ($err); if ($found) { # () is an empty list if (@val == 1 && $val[0] eq '') { @val = (); } last; } ($err,$found,$match,@val) = $self->_parse_open_close($test,$EOT,$lbrack,']'); last if ($err); if ($found) { # [] is [] if (@val == 1 && $val[0] eq '') { @val = ([]); } else { @val = ( [@val] ); } last; } ($err,$found,$match,@val) = $self->_parse_open_close($test,$EOT,$lbrace,'}'); last if ($err); if ($found) { if (@val == 1 && $val[0] eq '') { @val = ( {} ); } elsif (@val % 2 == 0) { # Even number of elements @val = ( {@val} ); } elsif (! defined $val[$#val] || $val[$#val] eq '') { # Odd number of elements with nothing as the # last element. pop(@val); @val = ( {@val} ); } else { # Odd number of elements not supported for a hash $err = "Malformed test [$l]: hash with odd number of elements"; } last; } # Check for some other token ($err,$found,$match,@val) = $self->_parse_simple_token($test,$EOT); last if ($err); last; } return ($err) if ($err); return ("Malformed test: unable to parse") if (! $found); foreach my $v (@val) { $v = '' if ($v eq '__blank__'); $v = undef if ($v eq '__undef__'); $v =~ s/__nl__/\n/g if ($v); } return (0,$match,@val) if ($found); return (0,0); } ### ### The next few routines parse parts of the test. Each of them ### take as arguments: ### ### $test : the listref containing the unparsed portion of ### the test ### $EOT : the end of a token ### ### + other args as needed. ### ### They all return: ### ### $err : a string containing the error (if any) ### $found : 1 if something matched ### $match : the character which terminates the current ### token signaling the start of the next token ### (this will either be a delimiter, a closing ### character, or nothing if the string ended on ### whitespace or a newline) ### @val : the value (or values) of the token ### # Check for a quoted string # 'STRING' # "STRING" # The string must be on one line, and everything up to the # closing quote is included (the quotes themselves are # stripped). # sub _parse_quoted { my($self,$test,$EOT) = @_; if ($$test[0] =~ s/^ $sp_opt $q ($min_str) $q $EOT//x || $$test[0] =~ s/^ $sp_opt $qq ($min_str) $qq $EOT//x) { return (0,1,$2,$1); } elsif ($$test[0] =~ /^ $sp_opt $q/x || $$test[0] =~ /^ $sp_opt $qq/x) { return ("Malformed test [$l]: improper quoting"); } return (0,0); } # Parses an open/close section. # # ( TOKEN TOKEN ... ) # (, TOKEN, TOKEN, ... ) # # $open is a regular expression matching the open, $close is the # actual closing character. # # After the closing character must be an $EOT. # sub _parse_open_close { my($self,$test,$EOT,$open,$close) = @_; # See if there is an open my($del,$newEOT); if ($$test[0] =~ s/^ $sp_opt $open ($delim) $sp_opt //x) { $del = $1; $newEOT = qr/ $sp_opt ($|\Q$del\E|\Q$close\E) /x; } elsif ($$test[0] =~ s/^ $sp_opt $open $sp_opt //x) { $del = ''; $newEOT = qr/ ($sp_opt $|$sp_opt \Q$close\E|$sp) /x; } else { return (0,0); } # If there was, then we need to read tokens until either: # the string is all used up => error # $close is found my($match,@val); while (1) { # Get a token. We MUST find something valid even if it is # an empty list followed by the closing character. my($e,$m,@v) = $self->_parse_token($test,$newEOT); return ($e) if ($e); $m =~ s/^$sp//; # If we ended on nothing, and $del is something, then we # ended on a newline with no delimiter. The next line MUST # start with a delimiter or close character or the test is # invalid. if (! $m && $del) { if (! $self->_test_line($test)) { return ("Malformed test [$l]: premature end of test"); } if ($$test[0] =~ s/^ $sp_opt $newEOT //x) { $m = $1; } else { return ("Malformed test [$l]: unexpected token (expected '$close' or '$del')"); } } # Figure out what value(s) were returned if ($m eq $close && ! @v) { push(@val,''); } else { push(@val,@v); } last if ($m eq $close); } # Now we need to find out what character ends this token: if ($$test[0] eq '') { # Ended at EOL return (0,1,'',@val); } if ($$test[0] =~ s/^ $sp_opt $EOT //x) { return (0,1,$1,@val); } else { return ("Malformed test [$l]: unexpected token"); } } # Checks for a simple token. # sub _parse_simple_token { my($self,$test,$EOT) = @_; $$test[0] =~ s/^ $sp_opt (.*?) $EOT//x; return (0,1,$2,$1); } } 1; # Local Variables: # mode: cperl # indent-tabs-mode: nil # cperl-indent-level: 3 # cperl-continued-statement-offset: 2 # cperl-continued-brace-offset: 0 # cperl-brace-offset: 0 # cperl-brace-imaginary-offset: 0 # cperl-label-offset: 0 # End: Test-Inter-1.09/lib/Test/Inter.pod0000644000175000001440000011615413442235233016273 0ustar sulbeckusers# Copyright (c) 2010-2019 Sullivan Beck. All rights reserved. # This program is free software; you can redistribute it and/or modify it # under the same terms as Perl itself. =pod =head1 NAME Test::Inter - framework for more readable interactive test scripts =head1 DESCRIPTION This is another framework for writing test scripts. Much of the syntax is loosely inspired by L, and Test::Inter has most of it's functionality, but it is not a drop-in replacement. L (and other existing test frameworks) suffer from two weaknesses, both of which have prevented me from ever using them: None offer the ability to access specific tests in a reasonably interactive fashion, primarily for debugging purposes None offer the ability to write the tests in whatever format would make the tests the most readable The way I write and use test scripts, existing Test::* modules are not nearly as useful as they could be. Test scripts written using L work fine when running as part of the test suite, but debugging an individual test requires extra steps, and the tests themselves are not as readable as they should be. =head1 INTERACTIVE EXECUTION One requirement that I have of a test framework is the ability to interact with it. I do most of my debugging using test scripts. When I find a bug, I write a test case for it (typically by adding it to an existing test script) and then debug it using the test script. Then I leave the test there to ensure that the bug won't come back (hopefully). Since I use test scripts in a very interactive way (often in the debugger), I want to be able to do the following trivially: =over 4 =item B I'd like to be able to run only a single test, or a subset of tests. =item B Setting a breakpoint in the debugger to run up to the start of the Nth test is one of the most common tasks I want to do when I'm debugging a failed test. =back To illustrate the first point, in L, a series of tests might be specified in a test script as shown in the following example (line numbers added for convenience): ... 100: # test 1 101: $result = func("apples","bushels"); 102: is($result, "enough"); 103: 104: # test 2 105: $result = func("grapefruit","tons"); 106: is($result, "enough"); 107: 108: # test 3 109: $result = func("oranges","boatloads"); 110: is($result, "insufficient"); 111: 112: # tests 4-6 113: foreach my $arg (qw(pears plums pineapple)) { 114: $result = func($arg,"boxes"); 115: is($result, "enough"); 116: } ... Say you ran the test suite, and test 3 failed. To debug it you have to open up the test script, find the 3rd test, and set the appropriate breakpoint. In this case, you'll want to break at line 109. None of these steps are impossible of course, but it will take some time to get it right. It becomes harder when there are lots of tests (imagine that you want to test the 117th test instead of the 3rd test) or when tests are wrapped up in loops, embedded in subroutines, or other similar situations. As an example, what if it's the 5th test that fails in the example above. Now the break point will be a conditional one, so you have to figure out not only the line, but the condition the appropriate state during that test. In this case, you need to stop at line 114 when $arg is 'plums'. Wouldn't it be far better to set a break point in func when the Nth test is reached? With Test::Inter, you can. So for the above script, the debugger commands that you would use to debug the 3rd test are: Test::More : b 109 Test::Inter: b func ($::TI_NUM==3) and the 5th test are: Test::More : b 114 ($arg eq 'plums') Test::Inter: b func ($::TI_NUM==5) It would also be nice to be able to skip the first two tests... perhaps they take a long time to run, and I want to get right to work on test 3. You can do this easily too by setting the $::TI_START variable. There are some other variables that can be used to specify which test or tests to run described in the L section below. The other thing I want to do when I run the test scripts interactively is to see more information which will assist in debugging a failed test. This can be controlled with variables such as TI_QUIET, TI_MODE, and TI_WIDTH described below in the L section. =head1 READABLE TESTS The other feature that I wanted in a test suite is the ability to define the tests in a format that is natural and readable FOR THE TESTS. In almost every case, it is best to think of a test script as consisting of two separate parts: a script part, and a test part. The script part of a test script is the least important part! It's usually fairly trivial, rarely needs to be changed, and is not the focus of the test script. The tests part of the script IS the important part, and these should be expressed in a form that is natural to them, easy to maintain, easy to read, and easy to modify, and none of these should involve modifying the script portion of the test script in general. Because the content of the tests is the important part of the script, the emphasis should be in making them more readable, even at the expense of the script portion. As a general rule, if the script portion of the test script obscures the tests in any way, it's not written correctly! The solution to this is well understood, and is common to many other systems where you are mixing two "languages". The task of correctly specifying both the tests and the test script is virtually identical to the task of creating a PHP script which consists of a mixture of PHP and HTML, or the task of creating a template file using some templating system where the file consists of a mixture of text to be displayed and templating commands. It is well understood in each of these cases that the more the two "languages" are interwoven, the less readable both are, and the harder it is to maintain. The more you are able to separate the two, the easier both are to read and maintain. As often as possible, I want the tests to be written in some sort of text format which can be easily viewed and modified (usually as a simple table) with no perl commands interspersed. I want to the freedom to define the tests in one section (a long string, the DATA section, or even in a separate file) which is easily readable. This may introduce the necessity of parsing it, but it makes it significantly easier to maintain the tests. This flexibility makes it much easier to read the tests (as opposed to the script) which is the fundamental content of a test script. Looking again at the example test script, you can see that there is far too much perl interspersed with the tests. It's difficult to read the tests individually in this script because there is too much perl code among them, and virtually impossible to look at them as a whole. It is true that looking at this particular example, it is very simple... but the script ISN'T the content you're interested in (and bear in mind that many test scripts are nowhere near this simple). The REAL content of this script are the tests, which consist of the function arguments and the expected result. Although it's not impossible to see each of these in the script above, it's not in a format that is conducive to studying the tests, and especially not for examining the list of tests as a whole. Now, look at an alternate way of specifying the tests using this module: $tests = " apples bushels => enough grapefruit tons => enough oranges boatloads => insufficient pears boxes => enough plums boxes => enough pineapple boxes => enough "; $o->tests(tests => $tests, func => \&func); Here, it's easy to see the list of tests, and adding additional tests is a breeze. =head1 CREATING A TEST This module supports a number of methods for defining tests, so you can use whichever one is most convenient (including methods that are identical to L if that really is the best method). Every test may have several pieces of information: =over 4 =item B Every test is automatically assigned a number, but it may be useful to specify a name of a test (which is actually a short description of the test). Whenever a test result is reported, the name will be given (if one was specified). The name may not have a '#' in it. The name is completely optional, but makes the results more readable. =item B In order to test something, you need to know what result was expected (or in some cases, what result was NOT expected). =item B You also need to know the results that you're comparing to the expected results. This can be obtained by simply working with a set of results, or a function name and a set of arguments to pass to it. =item B It is useful to be able to specify state information at the start of the test suite (for example, to see if certain features are available), and some tests may only run if those conditions are met. If no conditions are set for a test, it will always run. =item B Some tests may be marked as 'todo' tests. These are test which are allowed to fail (meaning that they have been put in place for an as-yet unimplemented feature). Since it is expected that the test will fail, the test suite will still pass, even if these tests fail. The tests will still run and if they pass, a message is issued saying that the feature is now implemented, and the tests should be graduated to non-todo state. =back =head1 BASE METHODS =over 4 =item B $o = new Test::Inter [$name] [%options]; This creates a new test framework. There are several options which may be used to specify which tests are run, how they are run, and what output is given. The entire test script can be named by passing in $name. All options can be set in four different ways. First, you can pass in a hash of B VAL> pairs in the new method. So, to set the B option, the C<%options>) hash would contain: start => VALUE Second, you can set an environment variable. This will override any value passed in the first way. The environment variable is named TI_XXX where XXX is the fully capitalized option. So: $ENV{TI_START} = VALUE The third method, which overrides the previous two, is to set a global variable. It is also named TI_XXX in the main namespace, so to set it this way, set: $::TI_START = VALUE The final method is to call one of the methods below and these override all other methods. Each of the allowed options are described below in the following base methods: start end testnum plan abort quiet mode skip_all width use_lib =item B $o->version(); Returns the version of the module. =item B $o->encoding($encoding); C<$encoding> is any value that can be passed as an encoding to perl's Encode::decode function. Use this if your test strings contain characters in other encodings. =item B $o = new Test::Inter 'start' => $N; $o->start($N) To define which test you want to start with, set the B option as described in the B method above. When the start test is defined, most tests numbered less than N are completely ignored. If the tests are being run quietly (see the B method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. One class of tests IS still executed. Tests run using the B or B (to test the loading of modules) are still run. If no value (or a value of 0) is used, tests run from the first test. =item B $o = new Test::Inter 'end' => $M; $o->end($M); To define which test you want to end with, set the B option as described in the B method above. When the end test is defined, all tests numbered more than M are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. If no value is given, it defaults to 0 (which means that all remaining tests are run). =item B $o = new Test::Inter 'testnum' => $N; $o->testnum($N); To run only a single test, set the B option as described in the B method above. It is equivalent to setting both the start and end tests to $N. =item B =item B $o = new Test::Inter 'plan' => $N; $o->plan($n); $o->done_testing(); $o->done_testing($n); The TAP API (the 'language' used to run a sequence of tests and see which ones failed and which ones passed) requires a statement of the number of tests that are expected to run. This statement can appear at the start of the test suite, or at the end. If you know in advance how many tests should run in the test script, you can set the B option as described in the B method above to the number of tests. If you know how many tests should run at the end of the test script, you can pass in a non-zero integer to the done_testing method. Frequently, you don't really care how many tests are in the script (especially if new tests are added on a regular basis). In this case, you still need to include a statement that says that the number of tests expected is however many were run. To do this, call the done_testing method with no argument. NOTE: if the plan method is used, it MUST be used before any tests are run (including those that test the loading of modules). If the done_testing method is used, it MUST be called after all tests are run. You must specify a plan or use a done_testing statement, but you cannot do both. It is NOT strictly required to set a plan if the script is only run interactively, so if for some reason this module is used for test scripts which are not part of a standard perl test suite, the plan and done_testing statements are optional. As a matter of fact, the script will run just fine without them... but a perl installer will report a failure in the test suite. =item B $o = new Test::Inter 'abort' => 0/1/2; $o->abort(0/1/2); To define how you want a failure to be treated, set the B option as described in the B method above. The B option can take a value of 0, 1, or 2. If this is set to 1, the test script will run unmodified until a test fails. At that point, all remaining tests will be skipped. If it is set to 2, the test script will run until a test fails at which point it will exit with an error code of 1. With a value of 0, failed tests will be reported, but the script will continue. In both cases, todo tests will NOT trigger the abort behavior. =item B $o = new Test::Inter 'quiet' => 0/1/2; $o->quiet(0/1/2); To define how you want failures to be reported, set the B option as described in the B method above. The B option can take a value of 0, 1, or 2. If this is set to 0 (the default), all information will be printed out. If it is set to 1, some optional information will not be printed. If it is set to 2, all optional information will not be printed. =item B $o = new Test::Inter 'mode' => MODE; $o->mode(MODE); Test::Inter scripts can be run in either an interactive mode, or as part of a test suite with different behaviors. To select the mode, set the B option as described in the B method above. The B option can take a value of 'inter' or 'test'. When run in test mode, it prints out the results using the TAP grammar (i.e. 'ok 1', 'not ok 3', etc.). When run in interactive mode, it prints out results in a more human readable format. =item B $o = new Test::Inter 'width' => WIDTH; $o->width(WIDTH); The width option can be set as described in the B method above. WIDTH is the width of the terminal (for printing out failed test information). It defaults to 80, but it can be set to any width (and lines longer then this are truncated). If WIDTH is set to 0, no truncation is done. =item B $o = new Test::Inter 'use_lib' => VALUE; $o->use_lib(VALUE); $o->use_lib(); By default, the library included in the module distribution will be added to the search path for modules, so a 'use MODULE' line should find the version stored in this module distribution. If VALUE is set to 'off', the search path will not be modified automatically. You may add the library path at a later time by calling: $o->use_lib('on'); $o->use_lib(); Note: both calls must be used. The first sets the option, the second actually modifies the search path. =item B $o = new Test::Inter 'skip_all' => REASON; $o->skip_all(REASON); The skip_all option can be set as described in the B method above. If this is set, the entire test script will be skipped for the reason given. This must be done before any test is run, and before any plan number is set. The skip_all can also be called at any point during the script (i.e. after tests have been run). In this case, all remaining scripts will be skipped. $o->skip_all(REASON,FEATURE,FEATURE,...); $o->skip_all('',FEATURE,FEATURE,...); This will skip all tests (or all remaining tests) unless all s are available. B can be entered as an empty string and the reason the tests are skipped will be a message about the missing feature. =item B $o->feature($feature,$val); This defines a feature. If C<$val> is non-zero, the feature is available. Otherwise it is not. =item B =item B $o->diag($message); $o->note($message); Both of these print an optional message. Messages printed with the C method are always optional and will be omitted if the B option is set to 1 or 2. Messages printed with the C method are optional and will not be printed if the B option is set to 2, but they will be printed if the B method is set to 1. =item B $o->testdir(); $o->testdir('mod'); $o->testdir('lib'); Occasionally, it may be necessary to know the directory where Test::Inter gets some of it's information. By default, the directory containing the tests will be returned, but if the optional argument 'mod' is included, it will return the path to the module distribution (which should include both a lib and t subdirerctory). If the argument 'lib' is included, it will return the directory where the libraries are stored. =back =head1 METHODS FOR LOADING MODULES Test scripts can load other modules (using either the perl C or C commands). There are three different modes for doing this which determine how this is done. =over 4 =item B By default, this is used to test for a module that is required for all tests in the test script. Loading the module is treated as an actual test in the test suite. The test is to determine whether the module is available and can be loaded. If it can be loaded, it is, and it is reported as a successful test. If it cannot be loaded, it is reported as a failed test. In the result of a failed test, all remaining tests will be skipped automatically (except for other tests which load modules). =item B In feature mode, loading the module is not treated as a test (i.e. it will not print out an 'ok' or 'not ok' line. Instead, it will set a feature (named the same as the module) which can be used to determine whether other tests should run or not. =item B In a few very rare cases, we may want to test for a module but expect that it not be present. This is the exact opposite of the B mode. Successfully loading the module is treated as a test failure. In the event of a failure, all remaining tests will be skipped. =back The methods available are: =over 4 =item B $o->require_ok($module [,$mode]); This is used to load a module using the perl C function. If C<$mode> is not passed in, the default mode (B) is used to test the existence of the module. If C<$mode> is passed in, it must be either the string 'forbid' or 'feature'. If C<$mode> is 'feature', a feature named C<$module> is set if the module was able to be loaded. =item B $o->use_ok(@args [,$mode]); This is used to load a module with C, or check a perl version. BEGIN { $o->use_ok('5.010'); } BEGIN { $o->use_ok('Some::Module'); } BEGIN { $o->use_ok('Some::Module',2.05); } BEGIN { $o->use_ok('Some::Module','foo','bar'); } BEGIN { $o->use_ok('Some::Module',2.05,'foo','bar'); } are the same as: use 5.010; use Some::Module; use Some::Module 2.05; use Some::Module qw(foo bar); use Some::Module 2.05 qw(foo bar); Putting the B call in a BEGIN block allows the functions to be imported at compile-time and prototypes are properly honored. You'll also need to load the Test::Inter module, and create the object in a BEGIN block. C<$mode> acts the same as in the B method. =back =head1 METHODS FOR RUNNING TEST There are several methods for running tests. The B, B, and B methods are included for those already comfortable with L and wishing to stick with the same format of test script. The B method is the suggested method though since it makes use of the full power of this module. =over 4 =item B $o->ok(TESTS); A test run with ok looks at a result, and if it evaluates to 0 (or false), it fails. If it evaluates to non-zero (or true), it passes. These tests do not require you to specify the expected results. If expected results are given, they will be compared against the result received, and if they differ, a diagnostic message will be printed, but the test will still succeed or fail based only on the actual result produced. These tests require a single result and either zero or one expected results. To run a single test, use any of the following: $o->ok(); # always succeeds $o->ok($result); $o->ok($result,$name); $o->ok($result,$expected,$name); $o->ok(\&func); $o->ok(\&func,$name); $o->ok(\&func,$expected,$name); $o->ok(\&func,\@args); $o->ok(\&func,\@args,$name); $o->ok(\&func,\@args,$expected,$name); If C<$result> is a scalar, the test passes if C<$result> is true. If C<$result> is a list reference, the test succeeds if the list contains any defined values. If C<$result> is a hash reference, the test succeeds if the hash contains any key with a value that is not C. If C<\&func> and C<\@args> are passed in, then C<$result> is generated by passing C<@args> to C<&func> and behaves identically to the calls where C<$result> is passed in. If C<\&func> is passed in but no arguments, the function takes no arguments, but still produces a result. If an expected value is passed in and the result does not match it, a diagnostic warning will be printed, even if the test passes. =item B =item B $o->is(TESTS); $o->isnt(TESTS); A test run with B looks at a result and tests to see if it is identical to an expected result. If it is, the test passes. Otherwise it fails. In the case of a failure, a diagnostic message will show what result was actually obtained and what was expected. A test run with B looks at a result and tests to see if the result obtained is different than an expected result. If it is different, the test passes. Otherwise it fails. The is method can be called in any of the following ways: $o->is($result,$expected); $o->is($result,$expected,$name); $o->is(\&func,$expected); $o->is(\&func,$expected,$name); $o->is(\&func,\@args,$expected); $o->is(\&func,\@args,$expected,$name); The B method can be called in exactly the same way. As with the B method, the result can be a scalar, hashref, or listref. If it is a hashref or listref, the entire structure must match the expected value. =item B $o->tests($opt=>$val, $opt=>$val, ...) The options available are described in the following section. =item B $o->file($func,$input,$outputdir,$expected,$name [,@args]); Sometimes it may be easiest to store the input, output, and expected output from a test in a text file. In this case, each line of output will be treated as a single test, so the output and expected output must match up exactly. C<$func> is a reference to a function which will produce a temporary output file. If C<$input> is specified, it is the name of the input file. If it is empty, no input file will be used. The input file can be fully specified, or it can be relative to the test directory. If C<$outputdir> is passed in, it is the directory where the output file will be written. It can be fully specified, or relative to the test directory. If C<$outputdir> is left blank, the temporary file will be written to the test directory. C<$expected> is the name of a file which contains the expected output. It can be fully specified, or it will be checked for in the test directory. C<$name> is the name of this series of tests. C<@args> are extra arguments to pass to the test function. The function will be called with the arguments: &$func( [$input,] $output,@args); C<$input> is only passed in if it was passed in to this method. If no input file is specified, nothing will be passed to the function. C<$output> is the name of a temporary file where the output will be written to. =back =head1 USING THE TESTS METHOD It is expected that most tests (except for those that load a module) will be run using the tests method called as: $o->tests(%options); The following options are available: =over 4 =item B name => NAME This sets the name of this set of tests. All tests will be given the same name. =item B =item B =item B In order to specify a series of tests, you have to specify either a function and a list of arguments, or a list of results. Specifying the function and list of arguments can be done using the pair: func => \&FUNCTION tests => TESTS If the B option is not set, B contains a list of results. A list of expected results may also be given. They can be included in the tests => TESTS option or included separately as: expected => RESULTS The way to specify these are covered in the next section SPECIFYING THE TESTS. =item B =item B feature => [FEATURE1, FEATURE2, ...] disable => [FEATURE1, FEATURE2, ...] The default set of tests to run is determined using the start, end, and skip_all methods discussed above. Using those methods, a list of tests is obtained, and it is expected that these will run. The feature and disable options modify the list. If the feature option is included, the tests given in this call will only run if ALL of the features listed are available. If the disable option is included, the tests will be run unless ANY of the features listed are available. =item B skip => REASON Skip these tests for the reason given. =item B todo => 0/1 Setting this to 1 says that these tests are allowed to fail. They represent a feature that is not yet implemented. If the tests succeed, a message will be printed notifying the developer that the tests are now ready to promote to actual use. =back =head1 SPECIFYING THE TESTS A series of tests can be specified in two different ways. The tests can be written in a very simple string format, or stored as a list. Demonstrating how this can be done is best done by example, so let's say that there is a function (func) which takes two arguments, and returns a single value. Let's say that the expected output (and the actual output) from 3 different sets of arguments is: Input Expected Output Actual Output ----- --------------- ------------- 1,2 a a 3,4 b x 5,6 c c (so in this case, the first and third tests pass, but the 2nd one will fail). Specifying these tests as lists could be done as: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ [a], [b], [c] ], ); Here, the tests are stored as a list, and each element in the list is a listref containing the set of arguments. If the B option is not passed in, the tests option is set to a list of results to compare with the expected results, so the following is equivalent to the above: $o->tests( tests => [ [a], [x], [c] ], expected => [ [a], [b], [c] ], ); If an argument (or actual result) or an expected result is only a single value, it can be entered as a scalar instead of a list ref, so the following is also equivalent: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ a, b, [c] ], ); The only exception to this is if the single value is itself a list reference. In this case it MUST be included as a reference. In other words, if you have a single test, and the expected value for this test is a list reference, it must be passed in as: expected => [ [ \@r ] ] NOT as: expected => [ \@r ] Passing in a set of expected results is optional. If none are passed in, the tests are treated as if they had been passed to the B method (i.e. if they return something true, they pass, otherwise they fail). The second way to specify tests is as a string. The string is a multi-line string with each tests being separate from the next test by a blank line. Comments (lines which begin with '#') are allowed, and are ignored. Whitespace at the start and end of the line is ignored. The string may contain the results directly, or results may be passed in separately. For example, the following all give the same sets of tests as the example above: $o->tests( func => &func, tests => " # Test 1 1 2 => a # Test 2 3 4 => b 5 6 => c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a], [b], [c] ] ); $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => " a b c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => " a b c ", ); The expected results may also consist of only a single set of results (in this case, it must be passed in as a listref). In this case, all of the tests are expected to have the same results. So, the following are equivalent: $o->tests( func => &func, tests => " 1 2 => a b 3 4 => a b 5 6 => a b ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a, b] ], ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => "a b", ); The number of expected values must either be 1 (i.e. all of the tests are expected to produce the same value) or exactly the same number as the number of tests. The parser is actually quite powerful, and can handle multi-line tests, quoted strings, and nested data structures. The test may be split across any number of lines, provided there is not a completely blank line (which signals the end of the test), so the following are equivalent: tests => "a b c", tests => "a b c", Arguments (or expected results) may include data structures. For example, the following are equivalent: tests => "[ a b ] { a 1 b 2 }" tests => [ [ [a,b], { a=>1, b=>2 } ] ] Whitespace is mostly optional, but there is one exception. An item must end with some kind of delimiter, so the following will fail: tests => "[a b][c d]" The first element (the list ref [a b]) must be separated from the second element by the delimiter (which is whitespace in this case), so it must be written as: tests => "[a b] [c d]" As already demonstrated, hashrefs and listrefs may be included and nested. Elements may also be included inside parens, but this is optional since all arguments and expected results are already treated as lists, so the following are equivalent: tests => "a b c" tests => "(a b) c" Although parens are optional, they may make things more readable, and allow you to use something other than whitespace as the delimiter. Since parens are actually ignored, a string '()' is also ignored, so do not use empty parentheses. If the character immediately following the opening paren, brace, or bracket is a punctuation mark, then it is used as the delimiter instead of whitespace. For example, the following are all equivalent: [ a b c ] [a b c] [, a,b,c ] [, a, b, c ] A delimiter is a single character, and the following may not be used as a delimiter: any opening/closing characters () [] {} single or double quotes alphanumeric characters underscore Whitespace (including newlines) around the delimiter is ignored, so the following is valid: [, a, b, c ] Two delimiters next to each other or a trailing delimiter produce an empty string. "(,a,b,)" => (a, b, '') "(,a,,b)" => (a, '', b) Hashrefs may be specified by braces and the following are equivalent: { a 1 b 2 } {, a,1,b,2 } {, a,1,b,2, } Note that a trailing delimiter is ignored if there are already an even number of elements, or an empty string otherwise. Nested structures are allowed: "[ [1 2] [3 4] ]" For example, $o->tests( func => &func, tests => "a [ b c ] { d 1 e 2 } => x y" ); is equivalent to: $o->tests( func => &func, tests => [ [a, [b,c], {d=>1,e=>2}] ], results => [ [x,y] ], ); Any single value can be surrounded by single or double quotes in order to include the delimiter. So: "(, a,'b,c',e )" is equivalent to: "( a b,c e )" Any single value can be the string '__undef__' which will be turned into an actual undef. If the value is '__blank__' it is turned into an empty string (''), though it can also be specified as '' directly. Any value can have an embedded newline by including a __nl__ in the value, but the value must be written on a single line. Expected results are separated from arguments by ' => '. =head1 TEST::INTER VARIABLES To summarize the information above, the following variables are used by Test::Inter. Each variable can be set in two different ways: as an environment variable and as a perl variable in the main namespace. For example, the TI_END variable can be set as: $::TI_END $ENV{TI_END} The following variables can be used to define which tests are run: =over 4 =item TI_START Set this to define the test you want to start with. Example: If you have a perl test script and you want to start running it at test 12, run the following shell commands: TI_START=12 ./my_test_script.t =item TI_END Set this to define the test you want to end with. =item TI_TESTNUM Set this to run only a single test =back There is also a variable TI_NUM (available only as $::TI_NUM) which is set automatically by Test::Inter to be the test currently being run. The following variables control what is output from the tests, and how it is formatted: =over 4 =item TI_QUIET How verbose the test script is. Values are 0 (most verbose) to 2 (least verbose). =item TI_MODE How the output is formatted. Values are 'inter' (interactive mode) or 'test' (test suite mode). Interactive mode is easier to read. Test mode is for running as part of a test suite. =item TI_WIDTH The width of the terminal. =back The following variables control how some tests are run: =over 4 =item TI_NOCLEAN When running a file test, the temporary output file will not be removed if this is set. =back =head1 HISTORY The history of this module dates back to 1996 when I needed to write a test suite for my Date::Manip module. At that time, none of the Test::* modules currently available in CPAN existed (the earliest ones didn't come along until 1998), so I was left completely on my own writing my test scripts. I wrote a very basic version of my test framework which allowed me to write all of the tests as a string, it would parse the string, count the tests, and then run them. Over the years, the functionality I wanted grew, and periodically, I'd go back and reexamine other Test frameworks (primarily Test::More) to see if I could replace my framework with an existing module... and I've always found them wanting, and chosen to extend my existing framework instead. As I've written other modules, I've wanted to use the framework in them too, so I've always just copied it in, but this is obviously tedious and error prone. I'm not sure why it took me so long... but in 2010, I finally decided it was time to rework the framework in a module form. I loosely based my module on Test::More. I like the functionality of that module, and wanted most of it (and I plan on adding more in future versions). So this module uses some similar syntax to Test::More (though it allows a great deal more flexibility in how the tests are specified). One thing to note is that I may have been able to write this module as an extension to Test::More, but after looking into that possibility, I decided that it would be faster to not do that. I did "borrow" a couple of routines from it (though they've been modified quite heavily) as a starting point for a few of the functions in this module, and I thank the authors of Test::More for their work. =head1 KNOWN BUGS AND LIMITATIONS None known. =head1 SEE ALSO L - the 'industry standard' of perl test frameworks =head1 BUGS AND QUESTIONS If you find a bug in Test::Inter, there are three ways to send it to me. Any of them are fine, so use the method that is easiest for you. =over 4 =item Direct email You are welcome to send it directly to me by email. The email address to use is: sbeck@cpan.org. =item CPAN Bug Tracking You can submit it using the CPAN tracking too. This can be done at the following URL: L =item GitHub You can submit it as an issue on GitHub. This can be done at the following URL: L =back Please do not use other means to report bugs (such as forums for a specific OS or Linux distribution) as it is impossible for me to keep up with all of them. When filing a bug report, please include the following information: =over 4 =item B Please include the version of Test::Inter you are using. You can get this by using the script: use Test::Inter; print $Test::Inter::VERSION,"\n"; =back If you want to report missing or incorrect codes, you must be running the most recent version of Test::Inter. If you find any problems with the documentation (errors, typos, or items that are not clear), please send them to me. I welcome any suggestions that will allow me to improve the documentation. =head1 LICENSE This script is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 AUTHOR Sullivan Beck (sbeck@cpan.org) =cut Test-Inter-1.09/Changes0000644000175000001440000000246413442510452014311 0ustar sulbeckusersWritten by: Sullivan Beck (sbeck@cpan.org) Any suggestions, bug reports, or donations :-) should be sent to me. 1.09 2019-04-13 - Added test directory to directories added in the use_lib method automatically. - Added functionality to the testdir method 1.08 2019-03-12 - Minor improvement to a test. - Added the use_lib method and automatically use the 'lib' directory. - Some improvements to the POD documentation. 1.07 2018-03-15 - Some improvements to the POD documentation. - Some changes to get better test coverage. - Better handling of empty structures (i.e [] and {}). 1.06 2015-02-11 - Fixed some typos. RT 88523. - Changed ChangeLog file to new Changes specification. - Documentation improvments - Put distro on GitHub. Gabor Szabo - Fixed Module::Build requirement. RT 102059. 1.05 2013-03-20 - Fixed the Build.PL/Makefile.PL scripts to not install the example scripts. RT 84091 1.04 2013-03-18 - The __nl__ substitution wasn't happening. 1.03 2011-06-28 - Missed one of the tests that fail with perl 5.015. 1.02 2011-06-23 - Added 'width' method. - Updated some tests which did not run correctly with perl 5.015. Renee Baecker 1.01 2010-04-29 - Use File::Basename and IO::File to get rid of two unix dependencies. 1.00 2010-04-29 - Initial release. Test-Inter-1.09/LICENSE0000644000175000001440000000015513442510557014024 0ustar sulbeckusersThis module is free software; you can redistribute it and/or modify it under the same terms as Perl itself. Test-Inter-1.09/Makefile.PL0000644000175000001440000000433213442510557014772 0ustar sulbeckusers###################################################################### # Makefile.PL for Test::Inter ###################################################################### use strict; use warnings; use 5.006; use ExtUtils::MakeMaker 6.30; my %IsWin32 = map { $_ => 1 } qw( MSWin32 NetWare symbian ); my $IsWin32 = 1 if ($IsWin32{ $^O }); my $Pvers = $]; my $ConvVers= $ExtUtils::MakeMaker::VERSION; my %module_prereq = ( 'Cwd' => '0', 'File::Basename' => '0', 'IO::File' => '0', ); my %script_prereq = ( ); my %test_prereq = ( 'File::Find::Rule' => '0', 'Test::More' => '0', 'Test::Pod' => '1.00', 'Test::Pod::Coverage' => '1.00', ); my %config_prereq = ( 'ExtUtils::MakeMaker' => '6.30', ); %module_prereq = (%module_prereq,%script_prereq); my $EU_MM_V = $ExtUtils::MakeMaker::VERSION; $EU_MM_V =~ s/_//g; if ($EU_MM_V < 6.64) { %config_prereq = (%config_prereq,%test_prereq); } WriteMakefile( NAME => "Test::Inter", VERSION => "1.09", ABSTRACT => "framework for more readable interactive test scripts", LICENSE => "perl", ($EU_MM_V >= 6.48 ? (MIN_PERL_VERSION => "5.006") : ()), AUTHOR => "Sullivan Beck (sbeck\@cpan.org)", "dist" => {COMPRESS=>"gzip",SUFFIX=>"gz"}, PL_FILES => {}, PREREQ_PM => \%module_prereq, ($EU_MM_V >= 6.52 ? (CONFIGURE_REQUIRES => \%config_prereq) : ()), ($EU_MM_V >= 6.64 ? (TEST_REQUIRES => \%test_prereq) : ()), ($EU_MM_V > 6.45 ? (META_ADD => { provides => { 'Test::Inter' => { file => 'lib/Test/Inter.pm', version => '1.09', }, }, } ) : ()), ($EU_MM_V > 6.46 ? (META_MERGE => { 'meta-spec' => { version => 2, }, resources => { repository => { type => 'git', web => 'https://github.com/SBECK-github/Test-Inter', url => 'git://github.com/SBECK-github/Test-Inter.git', }, }, } ) : ()), ); Test-Inter-1.09/META.yml0000644000175000001440000000153513442510560014265 0ustar sulbeckusers--- abstract: 'framework for more readable interactive test scripts' author: - 'Sullivan Beck (sbeck@cpan.org)' build_requires: ExtUtils::MakeMaker: '0' File::Find::Rule: '0' Test::More: '0' Test::Pod: '1.00' Test::Pod::Coverage: '1.00' configure_requires: ExtUtils::MakeMaker: '6.30' dynamic_config: 1 generated_by: 'ExtUtils::MakeMaker version 7.34, CPAN::Meta::Converter version 2.150010' license: perl meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: '1.4' name: Test-Inter no_index: directory: - t - inc provides: Test::Inter: file: lib/Test/Inter.pm version: '1.09' requires: Cwd: '0' File::Basename: '0' IO::File: '0' perl: '5.006' resources: repository: git://github.com/SBECK-github/Test-Inter.git version: '1.09' x_serialization_backend: 'CPAN::Meta::YAML version 0.018' Test-Inter-1.09/README0000644000175000001440000012533413442510560013700 0ustar sulbeckusersNAME Test::Inter - framework for more readable interactive test scripts DESCRIPTION This is another framework for writing test scripts. Much of the syntax is loosely inspired by Test::More, and Test::Inter has most of it's functionality, but it is not a drop-in replacement. Test::More (and other existing test frameworks) suffer from two weaknesses, both of which have prevented me from ever using them: None offer the ability to access specific tests in a reasonably interactive fashion, primarily for debugging purposes None offer the ability to write the tests in whatever format would make the tests the most readable The way I write and use test scripts, existing Test::* modules are not nearly as useful as they could be. Test scripts written using Test::More work fine when running as part of the test suite, but debugging an individual test requires extra steps, and the tests themselves are not as readable as they should be. INTERACTIVE EXECUTION One requirement that I have of a test framework is the ability to interact with it. I do most of my debugging using test scripts. When I find a bug, I write a test case for it (typically by adding it to an existing test script) and then debug it using the test script. Then I leave the test there to ensure that the bug won't come back (hopefully). Since I use test scripts in a very interactive way (often in the debugger), I want to be able to do the following trivially: Easy access to a specific test or tests I'd like to be able to run only a single test, or a subset of tests. Easily set breakpoints in the debugger Setting a breakpoint in the debugger to run up to the start of the Nth test is one of the most common tasks I want to do when I'm debugging a failed test. To illustrate the first point, in Test::More, a series of tests might be specified in a test script as shown in the following example (line numbers added for convenience): ... 100: # test 1 101: $result = func("apples","bushels"); 102: is($result, "enough"); 103: 104: # test 2 105: $result = func("grapefruit","tons"); 106: is($result, "enough"); 107: 108: # test 3 109: $result = func("oranges","boatloads"); 110: is($result, "insufficient"); 111: 112: # tests 4-6 113: foreach my $arg (qw(pears plums pineapple)) { 114: $result = func($arg,"boxes"); 115: is($result, "enough"); 116: } ... Say you ran the test suite, and test 3 failed. To debug it you have to open up the test script, find the 3rd test, and set the appropriate breakpoint. In this case, you'll want to break at line 109. None of these steps are impossible of course, but it will take some time to get it right. It becomes harder when there are lots of tests (imagine that you want to test the 117th test instead of the 3rd test) or when tests are wrapped up in loops, embedded in subroutines, or other similar situations. As an example, what if it's the 5th test that fails in the example above. Now the break point will be a conditional one, so you have to figure out not only the line, but the condition the appropriate state during that test. In this case, you need to stop at line 114 when $arg is 'plums'. Wouldn't it be far better to set a break point in func when the Nth test is reached? With Test::Inter, you can. So for the above script, the debugger commands that you would use to debug the 3rd test are: Test::More : b 109 Test::Inter: b func ($::TI_NUM==3) and the 5th test are: Test::More : b 114 ($arg eq 'plums') Test::Inter: b func ($::TI_NUM==5) It would also be nice to be able to skip the first two tests... perhaps they take a long time to run, and I want to get right to work on test 3. You can do this easily too by setting the $::TI_START variable. There are some other variables that can be used to specify which test or tests to run described in the "TEST::INTER VARIABLES" section below. The other thing I want to do when I run the test scripts interactively is to see more information which will assist in debugging a failed test. This can be controlled with variables such as TI_QUIET, TI_MODE, and TI_WIDTH described below in the "TEST::INTER VARIABLES" section. READABLE TESTS The other feature that I wanted in a test suite is the ability to define the tests in a format that is natural and readable FOR THE TESTS. In almost every case, it is best to think of a test script as consisting of two separate parts: a script part, and a test part. The script part of a test script is the least important part! It's usually fairly trivial, rarely needs to be changed, and is not the focus of the test script. The tests part of the script IS the important part, and these should be expressed in a form that is natural to them, easy to maintain, easy to read, and easy to modify, and none of these should involve modifying the script portion of the test script in general. Because the content of the tests is the important part of the script, the emphasis should be in making them more readable, even at the expense of the script portion. As a general rule, if the script portion of the test script obscures the tests in any way, it's not written correctly! The solution to this is well understood, and is common to many other systems where you are mixing two "languages". The task of correctly specifying both the tests and the test script is virtually identical to the task of creating a PHP script which consists of a mixture of PHP and HTML, or the task of creating a template file using some templating system where the file consists of a mixture of text to be displayed and templating commands. It is well understood in each of these cases that the more the two "languages" are interwoven, the less readable both are, and the harder it is to maintain. The more you are able to separate the two, the easier both are to read and maintain. As often as possible, I want the tests to be written in some sort of text format which can be easily viewed and modified (usually as a simple table) with no perl commands interspersed. I want to the freedom to define the tests in one section (a long string, the DATA section, or even in a separate file) which is easily readable. This may introduce the necessity of parsing it, but it makes it significantly easier to maintain the tests. This flexibility makes it much easier to read the tests (as opposed to the script) which is the fundamental content of a test script. Looking again at the example test script, you can see that there is far too much perl interspersed with the tests. It's difficult to read the tests individually in this script because there is too much perl code among them, and virtually impossible to look at them as a whole. It is true that looking at this particular example, it is very simple... but the script ISN'T the content you're interested in (and bear in mind that many test scripts are nowhere near this simple). The REAL content of this script are the tests, which consist of the function arguments and the expected result. Although it's not impossible to see each of these in the script above, it's not in a format that is conducive to studying the tests, and especially not for examining the list of tests as a whole. Now, look at an alternate way of specifying the tests using this module: $tests = " apples bushels => enough grapefruit tons => enough oranges boatloads => insufficient pears boxes => enough plums boxes => enough pineapple boxes => enough "; $o->tests(tests => $tests, func => \&func); Here, it's easy to see the list of tests, and adding additional tests is a breeze. CREATING A TEST This module supports a number of methods for defining tests, so you can use whichever one is most convenient (including methods that are identical to Test::More if that really is the best method). Every test may have several pieces of information: A name Every test is automatically assigned a number, but it may be useful to specify a name of a test (which is actually a short description of the test). Whenever a test result is reported, the name will be given (if one was specified). The name may not have a '#' in it. The name is completely optional, but makes the results more readable. An expected result In order to test something, you need to know what result was expected (or in some cases, what result was NOT expected). A function and arguments OR a result You also need to know the results that you're comparing to the expected results. This can be obtained by simply working with a set of results, or a function name and a set of arguments to pass to it. Conditions It is useful to be able to specify state information at the start of the test suite (for example, to see if certain features are available), and some tests may only run if those conditions are met. If no conditions are set for a test, it will always run. Todo tests Some tests may be marked as 'todo' tests. These are test which are allowed to fail (meaning that they have been put in place for an as-yet unimplemented feature). Since it is expected that the test will fail, the test suite will still pass, even if these tests fail. The tests will still run and if they pass, a message is issued saying that the feature is now implemented, and the tests should be graduated to non-todo state. BASE METHODS new $o = new Test::Inter [$name] [%options]; This creates a new test framework. There are several options which may be used to specify which tests are run, how they are run, and what output is given. The entire test script can be named by passing in $name. All options can be set in four different ways. First, you can pass in a hash of OPT = VAL> pairs in the new method. So, to set the start option, the %options) hash would contain: start => VALUE Second, you can set an environment variable. This will override any value passed in the first way. The environment variable is named TI_XXX where XXX is the fully capitalized option. So: $ENV{TI_START} = VALUE The third method, which overrides the previous two, is to set a global variable. It is also named TI_XXX in the main namespace, so to set it this way, set: $::TI_START = VALUE The final method is to call one of the methods below and these override all other methods. Each of the allowed options are described below in the following base methods: start end testnum plan abort quiet mode skip_all width use_lib version $o->version(); Returns the version of the module. encoding $o->encoding($encoding); $encoding is any value that can be passed as an encoding to perl's Encode::decode function. Use this if your test strings contain characters in other encodings. start $o = new Test::Inter 'start' => $N; $o->start($N) To define which test you want to start with, set the start option as described in the new method above. When the start test is defined, most tests numbered less than N are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. One class of tests IS still executed. Tests run using the require_ok or use_ok methods (to test the loading of modules) are still run. If no value (or a value of 0) is used, tests run from the first test. end $o = new Test::Inter 'end' => $M; $o->end($M); To define which test you want to end with, set the end option as described in the new method above. When the end test is defined, all tests numbered more than M are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. If no value is given, it defaults to 0 (which means that all remaining tests are run). testnum $o = new Test::Inter 'testnum' => $N; $o->testnum($N); To run only a single test, set the testnum option as described in the new method above. It is equivalent to setting both the start and end tests to $N. plan done_testing $o = new Test::Inter 'plan' => $N; $o->plan($n); $o->done_testing(); $o->done_testing($n); The TAP API (the 'language' used to run a sequence of tests and see which ones failed and which ones passed) requires a statement of the number of tests that are expected to run. This statement can appear at the start of the test suite, or at the end. If you know in advance how many tests should run in the test script, you can set the plan option as described in the new method above to the number of tests. If you know how many tests should run at the end of the test script, you can pass in a non-zero integer to the done_testing method. Frequently, you don't really care how many tests are in the script (especially if new tests are added on a regular basis). In this case, you still need to include a statement that says that the number of tests expected is however many were run. To do this, call the done_testing method with no argument. NOTE: if the plan method is used, it MUST be used before any tests are run (including those that test the loading of modules). If the done_testing method is used, it MUST be called after all tests are run. You must specify a plan or use a done_testing statement, but you cannot do both. It is NOT strictly required to set a plan if the script is only run interactively, so if for some reason this module is used for test scripts which are not part of a standard perl test suite, the plan and done_testing statements are optional. As a matter of fact, the script will run just fine without them... but a perl installer will report a failure in the test suite. abort $o = new Test::Inter 'abort' => 0/1/2; $o->abort(0/1/2); To define how you want a failure to be treated, set the abort option as described in the new method above. The abort option can take a value of 0, 1, or 2. If this is set to 1, the test script will run unmodified until a test fails. At that point, all remaining tests will be skipped. If it is set to 2, the test script will run until a test fails at which point it will exit with an error code of 1. With a value of 0, failed tests will be reported, but the script will continue. In both cases, todo tests will NOT trigger the abort behavior. quiet $o = new Test::Inter 'quiet' => 0/1/2; $o->quiet(0/1/2); To define how you want failures to be reported, set the quiet option as described in the new method above. The quiet option can take a value of 0, 1, or 2. If this is set to 0 (the default), all information will be printed out. If it is set to 1, some optional information will not be printed. If it is set to 2, all optional information will not be printed. mode $o = new Test::Inter 'mode' => MODE; $o->mode(MODE); Test::Inter scripts can be run in either an interactive mode, or as part of a test suite with different behaviors. To select the mode, set the mode option as described in the new method above. The mode option can take a value of 'inter' or 'test'. When run in test mode, it prints out the results using the TAP grammar (i.e. 'ok 1', 'not ok 3', etc.). When run in interactive mode, it prints out results in a more human readable format. width $o = new Test::Inter 'width' => WIDTH; $o->width(WIDTH); The width option can be set as described in the new method above. WIDTH is the width of the terminal (for printing out failed test information). It defaults to 80, but it can be set to any width (and lines longer then this are truncated). If WIDTH is set to 0, no truncation is done. use_lib $o = new Test::Inter 'use_lib' => VALUE; $o->use_lib(VALUE); $o->use_lib(); By default, the library included in the module distribution will be added to the search path for modules, so a 'use MODULE' line should find the version stored in this module distribution. If VALUE is set to 'off', the search path will not be modified automatically. You may add the library path at a later time by calling: $o->use_lib('on'); $o->use_lib(); Note: both calls must be used. The first sets the option, the second actually modifies the search path. skip_all $o = new Test::Inter 'skip_all' => REASON; $o->skip_all(REASON); The skip_all option can be set as described in the new method above. If this is set, the entire test script will be skipped for the reason given. This must be done before any test is run, and before any plan number is set. The skip_all can also be called at any point during the script (i.e. after tests have been run). In this case, all remaining scripts will be skipped. $o->skip_all(REASON,FEATURE,FEATURE,...); $o->skip_all('',FEATURE,FEATURE,...); This will skip all tests (or all remaining tests) unless all s are available. REASON can be entered as an empty string and the reason the tests are skipped will be a message about the missing feature. feature $o->feature($feature,$val); This defines a feature. If $val is non-zero, the feature is available. Otherwise it is not. diag note $o->diag($message); $o->note($message); Both of these print an optional message. Messages printed with the "note" method are always optional and will be omitted if the quiet option is set to 1 or 2. Messages printed with the "diag" method are optional and will not be printed if the quiet option is set to 2, but they will be printed if the quiet method is set to 1. testdir $o->testdir(); $o->testdir('mod'); $o->testdir('lib'); Occasionally, it may be necessary to know the directory where Test::Inter gets some of it's information. By default, the directory containing the tests will be returned, but if the optional argument 'mod' is included, it will return the path to the module distribution (which should include both a lib and t subdirerctory). If the argument 'lib' is included, it will return the directory where the libraries are stored. METHODS FOR LOADING MODULES Test scripts can load other modules (using either the perl "use" or "require" commands). There are three different modes for doing this which determine how this is done. required By default, this is used to test for a module that is required for all tests in the test script. Loading the module is treated as an actual test in the test suite. The test is to determine whether the module is available and can be loaded. If it can be loaded, it is, and it is reported as a successful test. If it cannot be loaded, it is reported as a failed test. In the result of a failed test, all remaining tests will be skipped automatically (except for other tests which load modules). feature In feature mode, loading the module is not treated as a test (i.e. it will not print out an 'ok' or 'not ok' line. Instead, it will set a feature (named the same as the module) which can be used to determine whether other tests should run or not. forbid In a few very rare cases, we may want to test for a module but expect that it not be present. This is the exact opposite of the required mode. Successfully loading the module is treated as a test failure. In the event of a failure, all remaining tests will be skipped. The methods available are: require_ok $o->require_ok($module [,$mode]); This is used to load a module using the perl "require" function. If $mode is not passed in, the default mode (required) is used to test the existence of the module. If $mode is passed in, it must be either the string 'forbid' or 'feature'. If $mode is 'feature', a feature named $module is set if the module was able to be loaded. use_ok $o->use_ok(@args [,$mode]); This is used to load a module with "use", or check a perl version. BEGIN { $o->use_ok('5.010'); } BEGIN { $o->use_ok('Some::Module'); } BEGIN { $o->use_ok('Some::Module',2.05); } BEGIN { $o->use_ok('Some::Module','foo','bar'); } BEGIN { $o->use_ok('Some::Module',2.05,'foo','bar'); } are the same as: use 5.010; use Some::Module; use Some::Module 2.05; use Some::Module qw(foo bar); use Some::Module 2.05 qw(foo bar); Putting the use_ok call in a BEGIN block allows the functions to be imported at compile-time and prototypes are properly honored. You'll also need to load the Test::Inter module, and create the object in a BEGIN block. $mode acts the same as in the require_ok method. METHODS FOR RUNNING TEST There are several methods for running tests. The ok, is, and isnt methods are included for those already comfortable with Test::More and wishing to stick with the same format of test script. The tests method is the suggested method though since it makes use of the full power of this module. ok $o->ok(TESTS); A test run with ok looks at a result, and if it evaluates to 0 (or false), it fails. If it evaluates to non-zero (or true), it passes. These tests do not require you to specify the expected results. If expected results are given, they will be compared against the result received, and if they differ, a diagnostic message will be printed, but the test will still succeed or fail based only on the actual result produced. These tests require a single result and either zero or one expected results. To run a single test, use any of the following: $o->ok(); # always succeeds $o->ok($result); $o->ok($result,$name); $o->ok($result,$expected,$name); $o->ok(\&func); $o->ok(\&func,$name); $o->ok(\&func,$expected,$name); $o->ok(\&func,\@args); $o->ok(\&func,\@args,$name); $o->ok(\&func,\@args,$expected,$name); If $result is a scalar, the test passes if $result is true. If $result is a list reference, the test succeeds if the list contains any defined values. If $result is a hash reference, the test succeeds if the hash contains any key with a value that is not "undef". If "\&func" and "\@args" are passed in, then $result is generated by passing @args to &func and behaves identically to the calls where $result is passed in. If "\&func" is passed in but no arguments, the function takes no arguments, but still produces a result. If an expected value is passed in and the result does not match it, a diagnostic warning will be printed, even if the test passes. is isnt $o->is(TESTS); $o->isnt(TESTS); A test run with is looks at a result and tests to see if it is identical to an expected result. If it is, the test passes. Otherwise it fails. In the case of a failure, a diagnostic message will show what result was actually obtained and what was expected. A test run with isnt looks at a result and tests to see if the result obtained is different than an expected result. If it is different, the test passes. Otherwise it fails. The is method can be called in any of the following ways: $o->is($result,$expected); $o->is($result,$expected,$name); $o->is(\&func,$expected); $o->is(\&func,$expected,$name); $o->is(\&func,\@args,$expected); $o->is(\&func,\@args,$expected,$name); The isnt method can be called in exactly the same way. As with the ok method, the result can be a scalar, hashref, or listref. If it is a hashref or listref, the entire structure must match the expected value. tests $o->tests($opt=>$val, $opt=>$val, ...) The options available are described in the following section. file $o->file($func,$input,$outputdir,$expected,$name [,@args]); Sometimes it may be easiest to store the input, output, and expected output from a test in a text file. In this case, each line of output will be treated as a single test, so the output and expected output must match up exactly. $func is a reference to a function which will produce a temporary output file. If $input is specified, it is the name of the input file. If it is empty, no input file will be used. The input file can be fully specified, or it can be relative to the test directory. If $outputdir is passed in, it is the directory where the output file will be written. It can be fully specified, or relative to the test directory. If $outputdir is left blank, the temporary file will be written to the test directory. $expected is the name of a file which contains the expected output. It can be fully specified, or it will be checked for in the test directory. $name is the name of this series of tests. @args are extra arguments to pass to the test function. The function will be called with the arguments: &$func( [$input,] $output,@args); $input is only passed in if it was passed in to this method. If no input file is specified, nothing will be passed to the function. $output is the name of a temporary file where the output will be written to. USING THE TESTS METHOD It is expected that most tests (except for those that load a module) will be run using the tests method called as: $o->tests(%options); The following options are available: name name => NAME This sets the name of this set of tests. All tests will be given the same name. tests func expected In order to specify a series of tests, you have to specify either a function and a list of arguments, or a list of results. Specifying the function and list of arguments can be done using the pair: func => \&FUNCTION tests => TESTS If the func option is not set, tests contains a list of results. A list of expected results may also be given. They can be included in the tests => TESTS option or included separately as: expected => RESULTS The way to specify these are covered in the next section SPECIFYING THE TESTS. feature disable feature => [FEATURE1, FEATURE2, ...] disable => [FEATURE1, FEATURE2, ...] The default set of tests to run is determined using the start, end, and skip_all methods discussed above. Using those methods, a list of tests is obtained, and it is expected that these will run. The feature and disable options modify the list. If the feature option is included, the tests given in this call will only run if ALL of the features listed are available. If the disable option is included, the tests will be run unless ANY of the features listed are available. skip skip => REASON Skip these tests for the reason given. todo todo => 0/1 Setting this to 1 says that these tests are allowed to fail. They represent a feature that is not yet implemented. If the tests succeed, a message will be printed notifying the developer that the tests are now ready to promote to actual use. SPECIFYING THE TESTS A series of tests can be specified in two different ways. The tests can be written in a very simple string format, or stored as a list. Demonstrating how this can be done is best done by example, so let's say that there is a function (func) which takes two arguments, and returns a single value. Let's say that the expected output (and the actual output) from 3 different sets of arguments is: Input Expected Output Actual Output ----- --------------- ------------- 1,2 a a 3,4 b x 5,6 c c (so in this case, the first and third tests pass, but the 2nd one will fail). Specifying these tests as lists could be done as: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ [a], [b], [c] ], ); Here, the tests are stored as a list, and each element in the list is a listref containing the set of arguments. If the func option is not passed in, the tests option is set to a list of results to compare with the expected results, so the following is equivalent to the above: $o->tests( tests => [ [a], [x], [c] ], expected => [ [a], [b], [c] ], ); If an argument (or actual result) or an expected result is only a single value, it can be entered as a scalar instead of a list ref, so the following is also equivalent: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ a, b, [c] ], ); The only exception to this is if the single value is itself a list reference. In this case it MUST be included as a reference. In other words, if you have a single test, and the expected value for this test is a list reference, it must be passed in as: expected => [ [ \@r ] ] NOT as: expected => [ \@r ] Passing in a set of expected results is optional. If none are passed in, the tests are treated as if they had been passed to the ok method (i.e. if they return something true, they pass, otherwise they fail). The second way to specify tests is as a string. The string is a multi-line string with each tests being separate from the next test by a blank line. Comments (lines which begin with '#') are allowed, and are ignored. Whitespace at the start and end of the line is ignored. The string may contain the results directly, or results may be passed in separately. For example, the following all give the same sets of tests as the example above: $o->tests( func => &func, tests => " # Test 1 1 2 => a # Test 2 3 4 => b 5 6 => c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a], [b], [c] ] ); $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => " a b c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => " a b c ", ); The expected results may also consist of only a single set of results (in this case, it must be passed in as a listref). In this case, all of the tests are expected to have the same results. So, the following are equivalent: $o->tests( func => &func, tests => " 1 2 => a b 3 4 => a b 5 6 => a b ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a, b] ], ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => "a b", ); The number of expected values must either be 1 (i.e. all of the tests are expected to produce the same value) or exactly the same number as the number of tests. The parser is actually quite powerful, and can handle multi-line tests, quoted strings, and nested data structures. The test may be split across any number of lines, provided there is not a completely blank line (which signals the end of the test), so the following are equivalent: tests => "a b c", tests => "a b c", Arguments (or expected results) may include data structures. For example, the following are equivalent: tests => "[ a b ] { a 1 b 2 }" tests => [ [ [a,b], { a=>1, b=>2 } ] ] Whitespace is mostly optional, but there is one exception. An item must end with some kind of delimiter, so the following will fail: tests => "[a b][c d]" The first element (the list ref [a b]) must be separated from the second element by the delimiter (which is whitespace in this case), so it must be written as: tests => "[a b] [c d]" As already demonstrated, hashrefs and listrefs may be included and nested. Elements may also be included inside parens, but this is optional since all arguments and expected results are already treated as lists, so the following are equivalent: tests => "a b c" tests => "(a b) c" Although parens are optional, they may make things more readable, and allow you to use something other than whitespace as the delimiter. Since parens are actually ignored, a string '()' is also ignored, so do not use empty parentheses. If the character immediately following the opening paren, brace, or bracket is a punctuation mark, then it is used as the delimiter instead of whitespace. For example, the following are all equivalent: [ a b c ] [a b c] [, a,b,c ] [, a, b, c ] A delimiter is a single character, and the following may not be used as a delimiter: any opening/closing characters () [] {} single or double quotes alphanumeric characters underscore Whitespace (including newlines) around the delimiter is ignored, so the following is valid: [, a, b, c ] Two delimiters next to each other or a trailing delimiter produce an empty string. "(,a,b,)" => (a, b, '') "(,a,,b)" => (a, '', b) Hashrefs may be specified by braces and the following are equivalent: { a 1 b 2 } {, a,1,b,2 } {, a,1,b,2, } Note that a trailing delimiter is ignored if there are already an even number of elements, or an empty string otherwise. Nested structures are allowed: "[ [1 2] [3 4] ]" For example, $o->tests( func => &func, tests => "a [ b c ] { d 1 e 2 } => x y" ); is equivalent to: $o->tests( func => &func, tests => [ [a, [b,c], {d=>1,e=>2}] ], results => [ [x,y] ], ); Any single value can be surrounded by single or double quotes in order to include the delimiter. So: "(, a,'b,c',e )" is equivalent to: "( a b,c e )" Any single value can be the string '__undef__' which will be turned into an actual undef. If the value is '__blank__' it is turned into an empty string (''), though it can also be specified as '' directly. Any value can have an embedded newline by including a __nl__ in the value, but the value must be written on a single line. Expected results are separated from arguments by ' => '. TEST::INTER VARIABLES To summarize the information above, the following variables are used by Test::Inter. Each variable can be set in two different ways: as an environment variable and as a perl variable in the main namespace. For example, the TI_END variable can be set as: $::TI_END $ENV{TI_END} The following variables can be used to define which tests are run: TI_START Set this to define the test you want to start with. Example: If you have a perl test script and you want to start running it at test 12, run the following shell commands: TI_START=12 ./my_test_script.t TI_END Set this to define the test you want to end with. TI_TESTNUM Set this to run only a single test There is also a variable TI_NUM (available only as $::TI_NUM) which is set automatically by Test::Inter to be the test currently being run. The following variables control what is output from the tests, and how it is formatted: TI_QUIET How verbose the test script is. Values are 0 (most verbose) to 2 (least verbose). TI_MODE How the output is formatted. Values are 'inter' (interactive mode) or 'test' (test suite mode). Interactive mode is easier to read. Test mode is for running as part of a test suite. TI_WIDTH The width of the terminal. The following variables control how some tests are run: TI_NOCLEAN When running a file test, the temporary output file will not be removed if this is set. HISTORY The history of this module dates back to 1996 when I needed to write a test suite for my Date::Manip module. At that time, none of the Test::* modules currently available in CPAN existed (the earliest ones didn't come along until 1998), so I was left completely on my own writing my test scripts. I wrote a very basic version of my test framework which allowed me to write all of the tests as a string, it would parse the string, count the tests, and then run them. Over the years, the functionality I wanted grew, and periodically, I'd go back and reexamine other Test frameworks (primarily Test::More) to see if I could replace my framework with an existing module... and I've always found them wanting, and chosen to extend my existing framework instead. As I've written other modules, I've wanted to use the framework in them too, so I've always just copied it in, but this is obviously tedious and error prone. I'm not sure why it took me so long... but in 2010, I finally decided it was time to rework the framework in a module form. I loosely based my module on Test::More. I like the functionality of that module, and wanted most of it (and I plan on adding more in future versions). So this module uses some similar syntax to Test::More (though it allows a great deal more flexibility in how the tests are specified). One thing to note is that I may have been able to write this module as an extension to Test::More, but after looking into that possibility, I decided that it would be faster to not do that. I did "borrow" a couple of routines from it (though they've been modified quite heavily) as a starting point for a few of the functions in this module, and I thank the authors of Test::More for their work. KNOWN BUGS AND LIMITATIONS None known. SEE ALSO Test::More - the 'industry standard' of perl test frameworks BUGS AND QUESTIONS If you find a bug in Test::Inter, there are three ways to send it to me. Any of them are fine, so use the method that is easiest for you. Direct email You are welcome to send it directly to me by email. The email address to use is: sbeck@cpan.org. CPAN Bug Tracking You can submit it using the CPAN tracking too. This can be done at the following URL: GitHub You can submit it as an issue on GitHub. This can be done at the following URL: Please do not use other means to report bugs (such as forums for a specific OS or Linux distribution) as it is impossible for me to keep up with all of them. When filing a bug report, please include the following information: Test::Inter version Please include the version of Test::Inter you are using. You can get this by using the script: use Test::Inter; print $Test::Inter::VERSION,"\n"; If you want to report missing or incorrect codes, you must be running the most recent version of Test::Inter. If you find any problems with the documentation (errors, typos, or items that are not clear), please send them to me. I welcome any suggestions that will allow me to improve the documentation. LICENSE This script is free software; you can redistribute it and/or modify it under the same terms as Perl itself. AUTHOR Sullivan Beck (sbeck@cpan.org) Test-Inter-1.09/INSTALL0000644000175000001440000000142513442510557014051 0ustar sulbeckusersFor instructions on installing this, or any other perl module in a UNIX environment, please refer to: http://www.cpan.org/modules/INSTALL.html http://learn.perl.org/faq/perlfaq8.html#How-do-I-install-a-module-from-CPAN- For instructions in a Windows environment running ActivePerl, refer to one of the following (depending on your version of perl): http://docs.activestate.com/activeperl/5.6/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.8/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.10/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.12/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.14/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.16/faq/ActivePerl-faq2.html Test-Inter-1.09/META.json0000644000175000001440000000305513442510560014434 0ustar sulbeckusers{ "abstract" : "framework for more readable interactive test scripts", "author" : [ "Sullivan Beck (sbeck@cpan.org)" ], "dynamic_config" : 1, "generated_by" : "ExtUtils::MakeMaker version 7.34, CPAN::Meta::Converter version 2.150010", "license" : [ "perl_5" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : 2 }, "name" : "Test-Inter", "no_index" : { "directory" : [ "t", "inc" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "configure" : { "requires" : { "ExtUtils::MakeMaker" : "6.30" } }, "runtime" : { "requires" : { "Cwd" : "0", "File::Basename" : "0", "IO::File" : "0", "perl" : "5.006" } }, "test" : { "requires" : { "File::Find::Rule" : "0", "Test::More" : "0", "Test::Pod" : "1.00", "Test::Pod::Coverage" : "1.00" } } }, "provides" : { "Test::Inter" : { "file" : "lib/Test/Inter.pm", "version" : "1.09" } }, "release_status" : "stable", "resources" : { "repository" : { "type" : "git", "url" : "git://github.com/SBECK-github/Test-Inter.git", "web" : "https://github.com/SBECK-github/Test-Inter" } }, "version" : "1.09", "x_serialization_backend" : "JSON::PP version 4.01" } Test-Inter-1.09/t/0000755000175000001440000000000013442510560013253 5ustar sulbeckusersTest-Inter-1.09/t/runtests0000755000175000001440000000075613442510557015106 0ustar sulbeckusers#!/bin/sh # Usage: # runtests [-T] [prefix] # # Runs all tests (or those starting with prefix),optionally in taint mode if [ -z "$TI_MODE" ]; then TI_MODE='inter' export TI_MODE fi if [ -z "$TI_QUIET" ]; then TI_QUIET=2 export TI_QUIET fi if [ "$RELEASE_TESTING" != "0" ]; then RELEASE_TESTING=1 export RELEASE_TESTING fi if [ "$1" = "-T" ]; then taint="-T -I../lib -I." shift else taint= fi subset=$1 for test in ${subset}*.t ;do perl $taint ./$test done Test-Inter-1.09/t/_pod_coverage.t0000755000175000001440000000271313442510557016250 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; use Test::More; use File::Basename; use Cwd 'abs_path'; use Test::Pod::Coverage 1.00; # Don't run tests for installs unless ($ENV{RELEASE_TESTING}) { plan skip_all => 'Author tests not required for installation (set RELEASE_TESTING to test)'; } # Figure out the directories. This comes from Test::Inter. my($moddir,$testdir,$libdir); BEGIN { if (-f "$0") { $moddir = dirname(dirname(abs_path($0))); } elsif (-d "./t") { $moddir = dirname(abs_path('.')); } elsif (-d "../t") { $moddir = dirname(abs_path('..')); } if (-d "$moddir/t") { $testdir = "$moddir/t"; } if (-d "$moddir/lib") { $libdir = "$moddir/lib"; } } use lib $libdir; # If there is a file _pod_coverage.ign, it should be a list of module # name substrings to ignore (any module with any of these substrings # will be ignored). my @ign = (); if (-f "$testdir/_pod_coverage.ign") { open(IN,"$testdir/_pod_coverage.ign"); @ign = ; close(IN); chomp(@ign); } # # Test that the POD documentation is complete. # chdir($moddir); if (@ign) { my @mod = all_modules('lib'); my @test = (); MOD: foreach my $mod (@mod) { foreach my $ign (@ign) { next MOD if ($mod =~ /\Q$ign\E/); } push(@test,$mod); } chdir($libdir); plan tests => scalar(@test); foreach my $mod (@test) { pod_coverage_ok($mod); } } else { all_pod_coverage_ok(); } Test-Inter-1.09/t/file.2.in0000644000175000001440000000001712404053065014657 0ustar sulbeckusersThis is a test Test-Inter-1.09/t/file.2.exp0000644000175000001440000000001712404053065015045 0ustar sulbeckusersThis is a test Test-Inter-1.09/t/require_ok.t0000755000175000001440000000065613442203644015621 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } } use Test::Inter; my $ti = new Test::Inter $0; $ti->require_ok('5.001'); $ti->require_ok('7.001','forbid'); $ti->require_ok('Config'); $ti->require_ok('Xxx::Yyy','forbid'); $ti->require_ok('Symbol','feature'); $ti->require_ok('Xxx::Zzz','feature'); $ti->done_testing(); Test-Inter-1.09/t/use_ok.3.t0000755000175000001440000000056013442203722015071 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; my $ti; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } use Test::Inter; $ti = new Test::Inter $0; $ti->use_ok('7.001','forbid'); $ti->use_ok('Config','xxxx','forbid'); $ti->use_ok('Storable',7.01,'dclone','forbid'); } $ti->done_testing(); Test-Inter-1.09/t/skip_all.t0000755000175000001440000000054213442203702015237 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } } use Test::Inter; my $ti = new Test::Inter $0; $ti->skip_all("testing skip_all"); $ti->plan(3); $ti->_ok("Test 1"); $ti->diag("Test 1 diagnostic message"); $ti->_ok("Test 2"); $ti->_ok("Test 3"); Test-Inter-1.09/t/use_ok.2.t0000755000175000001440000000054313442203716015074 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; my $ti; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } use Test::Inter; $ti = new Test::Inter $0; $ti->use_ok('7.001','forbid'); $ti->use_ok('Config','myconfig'); $ti->use_ok('Storable',1.01,'dclone'); } $ti->done_testing(); Test-Inter-1.09/t/file.1.exp0000644000175000001440000000004212404053065015042 0ustar sulbeckusersFirst line Second line Third line Test-Inter-1.09/t/_version.t0000755000175000001440000000370113442510557015276 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; use Test::Inter; use IO::File; use File::Find::Rule; my $ti = new Test::Inter $0; unless ($ENV{RELEASE_TESTING}) { $ti->skip_all('Author tests not required for installation (set RELEASE_TESTING to test)'); } # Figure out what module we are in. A module is in a directory: # My-Mod-Name-1.00 # It includes any number of .pm files, each of which contain a single # package. Every package is named: # My::Pack::Name # and includes a variable: # My::Pack::Name::VERSION my $testdir = $ti->testdir(); my $moddir = $ti->testdir('mod'); my $libdir = $ti->testdir('lib'); my @dir = split(/\//,$moddir); my $dir = pop(@dir); my($mod,$vers,$valid); if ($dir =~ /^(.*)\-(\d+\.\d+)$/) { $mod = $1; $vers = $2; $valid = 1; } else { $valid = 0; } # If there is a file _version.ign, it should be a list of filename # substrings to ignore (any .pm file with any of these substrings # will be ignored). my @ign = (); if (-f "$testdir/_version.ign") { open(IN,"$testdir/_version.ign"); @ign = ; close(IN); chomp(@ign); } $ti->ok($valid,"Valid directory"); $ti->skip_all('Remaining tests require a valid directory') if (! defined $vers); my $in = new IO::File; my @files = File::Find::Rule->file()->name('*.pm')->in($libdir); FILE: foreach my $file (@files) { foreach my $ign (@ign) { next FILE if ($file =~ /\Q$ign\E/); } $in->open($file); my @tmp = <$in>; chomp(@tmp); my @v = grep /^\$VERSION\s*=\s*['"]\d+\.\d+['"];$/, @tmp; if (! @v) { $ti->ok(0,$file); $ti->diag('File contains no valid version line'); } elsif (@v > 1) { $ti->ok(0,$file); $ti->diag('File contains multiple version lines'); } else { $v[0] =~ /['"](\d+\.\d+)['"]/; my $v = $1; $ti->is($v,$vers,$file); $ti->diag('File contains incorrect version number') if ($v ne $vers); } } $ti->done_testing(); Test-Inter-1.09/t/runtests.bat0000644000175000001440000000037513442510560015637 0ustar sulbeckusersperl -I../lib _version.t 1 perl -I../lib file.t 1 perl -I../lib is.t 1 perl -I../lib ok.t 1 perl -I../lib require_ok.t 1 perl -I../lib skip_all.t 1 perl -I../lib tests.t 1 perl -I../lib use_ok.1.t 1 perl -I../lib use_ok.2.t 1 perl -I../lib use_ok.3.t 1 Test-Inter-1.09/t/_pod.t0000755000175000001440000000323513442510557014375 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; use Test::More; use File::Basename; use Cwd 'abs_path'; use Test::Pod 1.00; # Don't run tests for installs unless ($ENV{RELEASE_TESTING}) { plan skip_all => 'Author tests not required for installation (set RELEASE_TESTING to test)'; } # Figure out the directories. This comes from Test::Inter. my($moddir,$testdir,$libdir); BEGIN { if (-f "$0") { $moddir = dirname(dirname(abs_path($0))); } elsif (-d "./t") { $moddir = dirname(abs_path('.')); } elsif (-d "../t") { $moddir = dirname(abs_path('..')); } if (-d "$moddir/t") { $testdir = "$moddir/t"; } if (-d "$moddir/lib") { $libdir = "$moddir/lib"; } } # If there is a file _pod.ign, it should be a list of filename # substrings to ignore (any file with any of these substrings # will be ignored). # # If there is a file named _pod.dirs, then pod files will be looked # at in those directories (instead of the default of all directories). my @ign = (); if (-f "$testdir/_pod.ign") { open(IN,"$testdir/_pod.ign"); @ign = ; close(IN); chomp(@ign); } my @dirs = (); if (-f "$testdir/_pod.dirs") { open(IN,"$testdir/_pod.dirs"); @dirs = ; close(IN); chomp(@dirs); } # # Test that the syntax of our POD documentation is valid. # chdir($moddir); if (@ign) { my @file = all_pod_files(@dirs); my @test; FILE: foreach my $file (@file) { foreach my $ign (@ign) { next FILE if ($file =~ /\Q$ign\E/); } push(@test,$file); } plan tests => scalar(@test); foreach my $file (@test) { pod_file_ok($file); } } else { all_pod_files_ok(@dirs); } Test-Inter-1.09/t/use_ok.1.t0000755000175000001440000000066513442203712015074 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; my $ti; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } use Test::Inter; $ti = new Test::Inter $0; $ti->use_ok('5.004'); $ti->use_ok('Config'); $ti->use_ok('Xxx::Yyy','forbid'); $ti->use_ok('Symbol','feature'); $ti->use_ok('Xxx::Zzz','feature'); $ti->use_ok('Storable',1.01); } $ti->done_testing(); Test-Inter-1.09/t/file.t0000755000175000001440000000132713442203572014367 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } } use Test::Inter; my $ti = new Test::Inter $0; sub func1 { my($tiutput) = @_; my @lines = ("First line", "Second line", "Third line"); open(OUT,">$tiutput"); foreach my $line (@lines) { print OUT "$line\n"; } close(OUT); } sub func2 { my($input,$tiutput) = @_; open(IN,$input); open(OUT,">$tiutput"); my @lines = ; print OUT @lines; close(IN); close(OUT); } $ti->file(\&func1,'', '','file.1.exp','No input'); $ti->file(\&func2,'file.2.in','','file.2.exp','File copy'); $ti->done_testing(); Test-Inter-1.09/t/is.t0000755000175000001440000000074113442203576014066 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } } use Test::Inter; my $ti = new Test::Inter $0; $ti->is ( [ 'a','b' ], [ 'a','b' ], "List test" ); $ti->isnt( [ 'a','b' ], [ 'a','c' ], "List test" ); $ti->is ( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 2 }, "Hash test" ); $ti->isnt( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 3 }, "Hash test" ); $ti->done_testing(); Test-Inter-1.09/t/tests.t0000755000175000001440000000216613442203706014613 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } } use Test::Inter; my $ti = new Test::Inter $0; sub func { my(@args) = @_; my @ret; foreach my $arg (@args) { push(@ret,length($arg)); } return @ret; } $ti->tests(func => \&func, tests => "foo => 3 a ab => 1 2 (x xy xyz) => 1 2 3 (a) (bc) => 1 2 (a (b cd)) => 1 1 2 (,a,bc) => 1 2 (,a,b c) => 1 3 "); $ti->tests(func => \&func, expected => [ [1,2] ], tests => "a ab c cd e ef "); $ti->tests(func => \&func, expected => "1 2", tests => "a ab c cd e ef "); $ti->tests(tests => " '' '' [] [] {} {} "); $ti->done_testing(); Test-Inter-1.09/t/ok.t0000755000175000001440000000203613442203616014056 0ustar sulbeckusers#!/usr/bin/perl use warnings 'all'; use strict; BEGIN { if (-d "lib") { use lib "./lib"; } elsif (-d "../lib") { use lib "../lib"; } } use Test::Inter; my $ti = new Test::Inter $0; $ti->ok(); $ti->ok( 1 == 1 ); $ti->ok( 1 == 1, "Basic test" ); $ti->ok( 1 == 1, 1, "Basic test" ); $ti->ok( 1 == 1, 2, "Basic test" ); sub func_false { return 0; } sub func_true { return 1; } sub func { my($a,$b) = @_; return $a == $b; } $ti->ok( \&func_true ); $ti->ok( \&func_true, "True test" ); $ti->ok( \&func_true, 1, "True test" ); $ti->ok( \&func_true, 2, "True test" ); $ti->ok( \&func, [1,1]); $ti->ok( \&func, [1,1], "Func test" ); $ti->ok( \&func, [1,1], 1, "Func test" ); $ti->ok( \&func, [1,1], 2, "Func test" ); $ti->ok( [ 'a','b' ], [ 'a','b' ], "List test" ); $ti->ok( [ 'a','b' ], [ 'a','c' ], "List test (non-identical)" ); $ti->ok( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 2 }, "Hash test" ); $ti->ok( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 3 }, "Hash test (non-identical)" ); $ti->done_testing(); Test-Inter-1.09/examples/0000755000175000001440000000000012404053065014625 5ustar sulbeckusersTest-Inter-1.09/examples/ok0000755000175000001440000000254212404053065015167 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; print "The following tests should all succeed\n\n"; $o->ok(); $o->ok( 1 == 1 ); $o->ok( 1 == 1, "Basic test" ); $o->ok( 1 == 1, 1, "Basic test" ); $o->ok( 1 == 1, 2, "Basic test" ); sub func_false { return 0; } sub func_true { return 1; } sub func { my($a,$b) = @_; return $a == $b; } $o->ok( \&func_true ); $o->ok( \&func_true, "True test" ); $o->ok( \&func_true, 1, "True test" ); $o->ok( \&func_true, 2, "True test" ); $o->ok( \&func, [1,1]); $o->ok( \&func, [1,1], "Func test" ); $o->ok( \&func, [1,1], 1, "Func test" ); $o->ok( \&func, [1,1], 2, "Func test" ); $o->ok( [ a,b ], [ a,b ], "List test" ); $o->ok( [ a,b ], [ a,c ], "List test (non-identical)" ); $o->ok( { a => 1, b => 2 }, { a => 1, b => 2 }, "Hash test" ); $o->ok( { a => 1, b => 2 }, { a => 1, b => 3 }, "Hash test (non-identical)" ); print "\nThe following tests should all fail\n\n"; $o->ok( 1 == 2 ); $o->ok( 1 == 2, "Basic test" ); $o->ok( 1 == 2, 1, "Basic test" ); $o->ok( \&func_false ); $o->ok( \&func_false, "False test" ); $o->ok( \&func_false, 1, "False test" ); $o->ok( \&func, [1,2]); $o->ok( \&func, [1,2], "Func test" ); $o->ok( \&func, [1,2], 1, "Func test" ); $o->ok( [], [ a,b ], "List test" ); $o->ok( [undef], [ a,b ], "List test" ); $o->ok( { a => undef }, { a => 1 }, "Hash test" ); Test-Inter-1.09/examples/is0000755000175000001440000000112512404053065015165 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; print "The following tests should all succeed\n\n"; $o->is ( [ a,b ], [ a,b ], "List test" ); $o->isnt( [ a,b ], [ a,c ], "List test" ); $o->is ( { a => 1, b => 2 }, { a => 1, b => 2 }, "Hash test" ); $o->isnt( { a => 1, b => 2 }, { a => 1, b => 3 }, "Hash test" ); print "\nThe following tests should all fail\n\n"; $o->isnt( [ a,b ], [ a,b ], "List test" ); $o->is ( [ a,b ], [ a,c ], "List test" ); $o->isnt( { a => 1, b => 2 }, { a => 1, b => 2 }, "Hash test" ); $o->is ( { a => 1, b => 2 }, { a => 1, b => 3 }, "Hash test" ); Test-Inter-1.09/examples/tests0000755000175000001440000000204212404053065015713 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; print "The following tests test some improperly formed tests\n\n"; sub func1 { my($a,$b) = @_; if ($a eq 'a' && $b eq 'b') { return 1; } elsif ($a eq 'c' && $b eq 'd') { return 2; } elsif ($a eq 'e' && $b eq 'f') { return 3; } } print "The 2nd one fails with 'expected results for some, not others\n\n"; $o->tests(func => \&func1, tests => "a b => 1 c d e f => 3"); print "\n\nFails with '=>' found twice\n\n"; $o->tests(func => \&func1, tests => "a b => 1 => 1"); print "\n\nFails with odd number of elements in hash\n\n"; $o->tests(func => \&func1, tests => "{ a b c } => 1"); print "\n\nFails with improper quoting\n\n"; $o->tests(func => \&func1, tests => "a 'b => 1"); print "\n\nFails with unable to parse\n\n"; $o->tests(func => \&func1, tests => "(a b c"); print "\n\nFails with unexpected token\n\n"; $o->tests(func => \&func1, tests => "(a b c)(d e) => 1"); Test-Inter-1.09/examples/plan0000755000175000001440000000031112404053065015500 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; use Test::Inter; my $o = new Test::Inter; print "The following script will fail due to multiple plans\n\n"; $o->plan(2); $o->plan(5); $o->done_testing(); Test-Inter-1.09/examples/use_ok0000755000175000001440000000043012404053065016035 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; our($o); BEGIN { print "The first test will fail, all others will be skipped.\n\n"; use Test::Inter; $o = new Test::Inter; } BEGIN { $o->use_ok('Xxx::Yyy'); } $o->ok(); $o->ok( 1 == 1 ); $o->ok( 1 == 2 ); $o->done_testing(); Test-Inter-1.09/MANIFEST0000644000175000001440000000115413442510560014142 0ustar sulbeckusersChanges examples/is examples/ok examples/plan examples/tests examples/use_ok INSTALL internal/build.yaml internal/ispell.isp internal/NOTES internal/TODO lib/Test/Inter.pm lib/Test/Inter.pod LICENSE Makefile.PL MANIFEST This list of files README t/_pod.t t/_pod_coverage.t t/_version.t t/file.1.exp t/file.2.exp t/file.2.in t/file.t t/is.t t/ok.t t/require_ok.t t/runtests t/runtests.bat t/skip_all.t t/tests.t t/use_ok.1.t t/use_ok.2.t t/use_ok.3.t META.yml Module YAML meta-data (added by MakeMaker) META.json Module JSON meta-data (added by MakeMaker) Test-Inter-1.09/internal/0000755000175000001440000000000013442203200014612 5ustar sulbeckusersTest-Inter-1.09/internal/NOTES0000644000175000001440000000043013246005220015426 0ustar sulbeckusers######################################################################### Before every release, spellcheck the documents: for i in `find lib -type f -name \*.pod` ;do pod2text $i > $i.txt ispell -p internal/ispell.isp $i.txt rm -f $i.txt $i.txt.bak done Test-Inter-1.09/internal/TODO0000644000175000001440000000257313442203200015311 0ustar sulbeckusersConsider adding the following from Test::More note explain can_ok isa_ok subtest cmp_ok like unlike handles strings match regexps lists contain certain element hashes contain certain keys Add support for input files Look at: Test::Group Test::Slow to see if there's anything I want to add. Look at using Data::PrettyPrintObjects to print out the results. Add support for timing tests: ok 1 (12 ms) ok 2 (23 ms) Name each test (and break on test name) even if tests defined in a string. ========================================================================= OLD STUFF # Add a variable TI_TODO # if '' (default) pass even if TODO tests fail # if 'warn', pass but warn about TODO tests # if 'fail', fail if TODO tests fail # Add functions: # like, unlike # cmp_ok # can_ok # isa_ok # new_ok # subtest # pass, fail # is_deeply # explain # Conditional tests # skip(N) skip next N (defaults to 1) tests # todo_skip # ARGS => VALS # true if returned values are itentical to VALS # ARGS =>OP VALS # =>is # =>isnt # =>like # =>unlike # =>cmp(NOP) # NOP : < > <= >= != # =>cmp(N,M) # true if VAL > N and VAL < M # =>cmp[N,M] # true if VAL >= N and VAL <= M # =>cmp(N,M] # =>cmp[N,M) # =>member # true if the return value is any of the VALS Test-Inter-1.09/internal/ispell.isp0000644000175000001440000000035213442002225016623 0ustar sulbeckusersAPI arg args config cpan diag Dist ENV eq foo foreach func GitHub github hashref hashrefs html http https isnt listref listrefs Manip nl NOCLEAN NUM ok org outputdir paren parens perl perl's qw rt sbeck testdir testnum todo undef val Test-Inter-1.09/internal/build.yaml0000644000175000001440000000004212404053065016602 0ustar sulbeckusers--- script: examples: 0 git: 1