Test-Inter-1.06/0000755000175000001440000000000012466660014013012 5ustar sulbeckusersTest-Inter-1.06/t/0000755000175000001440000000000012466660014013255 5ustar sulbeckusersTest-Inter-1.06/t/use_ok.2.t0000755000175000001440000000040512404053065015063 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; our($o); BEGIN { use Test::Inter; $o = new Test::Inter; } BEGIN { $o->use_ok('7.001','forbid'); } BEGIN { $o->use_ok('Config','myconfig'); } BEGIN { $o->use_ok('Storable',1.01,'dclone'); } $o->done_testing(); Test-Inter-1.06/t/tests.t0000755000175000001440000000167012404053065014605 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; sub func { my(@args) = @_; my @ret; foreach my $arg (@args) { push(@ret,length($arg)); } return @ret; } $o->tests(func => \&func, tests => "foo => 3 a ab => 1 2 (x xy xyz) => 1 2 3 (a) (bc) => 1 2 (a (b cd)) => 1 1 2 (,a,bc) => 1 2 (,a,b c) => 1 3 "); $o->tests(func => \&func, expected => [ [1,2] ], tests => "a ab c cd e ef "); $o->tests(func => \&func, expected => "1 2", tests => "a ab c cd e ef "); $o->tests(tests => "1 '' '' "); $o->done_testing(); Test-Inter-1.06/t/file.2.exp0000644000175000001440000000001712404053065015042 0ustar sulbeckusersThis is a test Test-Inter-1.06/t/skip_all.t0000755000175000001440000000034612404053065015240 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; use Test::Inter; my $o = new Test::Inter; $o->skip_all("testing skip_all"); $o->plan(3); $o->_ok("Test 1"); $o->diag("Test 1 diagnostic message"); $o->_ok("Test 2"); $o->_ok("Test 3"); Test-Inter-1.06/t/runtests.bat0000644000175000001440000000034212466660012015631 0ustar sulbeckusersperl -I../lib file.t 1 perl -I../lib is.t 1 perl -I../lib ok.t 1 perl -I../lib require_ok.t 1 perl -I../lib skip_all.t 1 perl -I../lib tests.t 1 perl -I../lib use_ok.1.t 1 perl -I../lib use_ok.2.t 1 perl -I../lib use_ok.3.t 1 Test-Inter-1.06/t/file.2.in0000644000175000001440000000001712404053065014654 0ustar sulbeckusersThis is a test Test-Inter-1.06/t/is.t0000755000175000001440000000051012404053065014046 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; $o->is ( [ 'a','b' ], [ 'a','b' ], "List test" ); $o->isnt( [ 'a','b' ], [ 'a','c' ], "List test" ); $o->is ( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 2 }, "Hash test" ); $o->isnt( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 3 }, "Hash test" ); $o->done_testing(); Test-Inter-1.06/t/use_ok.1.t0000755000175000001440000000055112404053065015064 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; our($o); BEGIN { use Test::Inter; $o = new Test::Inter; } BEGIN { $o->use_ok('5.004'); } BEGIN { $o->use_ok('Config'); } BEGIN { $o->use_ok('Xxx::Yyy','forbid'); } BEGIN { $o->use_ok('Symbol','feature'); } BEGIN { $o->use_ok('Xxx::Zzz','feature'); } BEGIN { $o->use_ok('Storable',1.01); } $o->done_testing(); Test-Inter-1.06/t/runtests0000755000175000001440000000064512466660012015075 0ustar sulbeckusers#!/bin/sh # Usage: # runtests [-T] [prefix] # # Runs all tests (or those starting with prefix),optionally in taint mode TI_MODE='inter' export TI_MODE TI_QUIET=2 export TI_QUIET if [ "$RELEASE_TESTING" != "0" ]; then RELEASE_TESTING=1 export RELEASE_TESTING fi if [ "$1" = "-T" ]; then taint="-T -I../lib -I." shift else taint= fi subset=$1 for test in ${subset}*.t ;do perl $taint ./$test done Test-Inter-1.06/t/file.1.exp0000644000175000001440000000004212404053065015037 0ustar sulbeckusersFirst line Second line Third line Test-Inter-1.06/t/pod.t0000755000175000001440000000227212466660012014230 0ustar sulbeckusers#!/usr/bin/perl # # Test that the syntax of our POD documentation is valid. # use strict; use Test::More; # Don't run tests for installs unless ( $ENV{RELEASE_TESTING} ) { plan( skip_all => "Author tests not required for installation" ); } eval "use Test::Pod 1.00"; plan skip_all => "Test::Pod 1.00 required for testing POD" if $@; eval "use File::Basename"; plan skip_all => "File::Basename required for testing POD" if $@; # If there is a file pod.ign, it should be a list of filename # substrings to ignore (any file with any of these substrings # will be ignored). # Find the pod.ign file my($testdir); if (-f "$0") { my $COM = $0; $testdir = dirname($COM); $testdir = '.' if (! $testdir); } elsif (-d 't') { $testdir = 't'; } else { $testdir = '.'; } my @ign = (); if (-f "$testdir/pod.ign") { open(IN,"$testdir/pod.ign"); @ign = ; close(IN); chomp(@ign); } chdir("..") if ($testdir eq '.'); if (@ign) { my @file = all_pod_files(); FILE: foreach my $file (@file) { foreach my $ign (@ign) { next FILE if ($file =~ /\Q$ign\E/); } pod_file_ok($file); } done_testing(); } else { all_pod_files_ok(); } Test-Inter-1.06/t/require_ok.t0000755000175000001440000000046112404053065015605 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; use Test::Inter; my $o = new Test::Inter; $o->require_ok('5.001'); $o->require_ok('7.001','forbid'); $o->require_ok('Config'); $o->require_ok('Xxx::Yyy','forbid'); $o->require_ok('Symbol','feature'); $o->require_ok('Xxx::Zzz','feature'); $o->done_testing(); Test-Inter-1.06/t/use_ok.3.t0000755000175000001440000000042312404053065015064 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; our($o); BEGIN { use Test::Inter; $o = new Test::Inter; } BEGIN { $o->use_ok('7.001','forbid'); } BEGIN { $o->use_ok('Config','xxxx','forbid'); } BEGIN { $o->use_ok('Storable',7.01,'dclone','forbid'); } $o->done_testing(); Test-Inter-1.06/t/ok.t0000755000175000001440000000157012404053065014053 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; $o->ok(); $o->ok( 1 == 1 ); $o->ok( 1 == 1, "Basic test" ); $o->ok( 1 == 1, 1, "Basic test" ); $o->ok( 1 == 1, 2, "Basic test" ); sub func_false { return 0; } sub func_true { return 1; } sub func { my($a,$b) = @_; return $a == $b; } $o->ok( \&func_true ); $o->ok( \&func_true, "True test" ); $o->ok( \&func_true, 1, "True test" ); $o->ok( \&func_true, 2, "True test" ); $o->ok( \&func, [1,1]); $o->ok( \&func, [1,1], "Func test" ); $o->ok( \&func, [1,1], 1, "Func test" ); $o->ok( \&func, [1,1], 2, "Func test" ); $o->ok( [ 'a','b' ], [ 'a','b' ], "List test" ); $o->ok( [ 'a','b' ], [ 'a','c' ], "List test (non-identical)" ); $o->ok( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 2 }, "Hash test" ); $o->ok( { 'a' => 1, 'b' => 2 }, { 'a' => 1, 'b' => 3 }, "Hash test (non-identical)" ); $o->done_testing(); Test-Inter-1.06/t/file.t0000755000175000001440000000107412404053065014360 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; sub func1 { my($output) = @_; my @lines = ("First line", "Second line", "Third line"); open(OUT,">$output"); foreach my $line (@lines) { print OUT "$line\n"; } close(OUT); } sub func2 { my($input,$output) = @_; open(IN,$input); open(OUT,">$output"); my @lines = ; print OUT @lines; close(IN); close(OUT); } $o->file(\&func1,'', '','file.1.exp','No input'); $o->file(\&func2,'file.2.in','','file.2.exp','File copy'); $o->done_testing(); Test-Inter-1.06/t/pod_coverage.t0000755000175000001440000000237512466660012016107 0ustar sulbeckusers#!/usr/bin/perl # # Test that the POD documentation is complete. # use strict; use Test::More; # Don't run tests for installs unless ( $ENV{RELEASE_TESTING} ) { plan( skip_all => "Author tests not required for installation" ); } eval "use Test::Pod::Coverage 1.00"; plan skip_all => "Test::Pod::Coverage 1.00 required for testing POD coverage" if $@; eval "use File::Basename"; plan skip_all => "File::Basename required for testing POD coverage" if $@; # If there is a file pod_coverage.ign, it should be a list of module # name substrings to ignore (any module with any of these substrings # will be ignored). # Find the pod_coverage.ign file my($testdir); if (-f "$0") { my $COM = $0; $testdir = dirname($COM); $testdir = '.' if (! $testdir); } elsif (-d 't') { $testdir = 't'; } else { $testdir = '.'; } my @ign = (); if (-f "$testdir/pod_coverage.ign") { open(IN,"$testdir/pod_coverage.ign"); @ign = ; close(IN); chomp(@ign); } chdir("..") if ($testdir eq '.'); if (@ign) { my @mod = all_modules(); MOD: foreach my $mod (@mod) { foreach my $ign (@ign) { next MOD if ($mod =~ /\Q$ign\E/); } pod_coverage_ok($mod); } done_testing(); } else { all_pod_coverage_ok(); } Test-Inter-1.06/META.json0000644000175000001440000000231612466660013014434 0ustar sulbeckusers{ "abstract" : "framework for more readable interactive test scripts", "author" : [ "Sullivan Beck " ], "dynamic_config" : 1, "generated_by" : "Module::Build version 0.4205", "license" : [ "perl_5" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : "2" }, "name" : "Test-Inter", "prereqs" : { "build" : { "requires" : { "Module::Build" : "0.21" } }, "configure" : { "requires" : { "Module::Build" : "0.21" } }, "runtime" : { "requires" : { "File::Basename" : "0", "IO::File" : "0", "perl" : "5.006" } }, "test" : { "requires" : { "Test::More" : "0" } } }, "provides" : { "Test::Inter" : { "file" : "lib/Test/Inter.pm", "version" : "1.06" } }, "release_status" : "stable", "resources" : { "license" : [ "http://dev.perl.org/licenses/" ], "repository" : { "url" : "https://github.com/SBECK-github/Test-Inter" } }, "version" : "1.06" } Test-Inter-1.06/README0000644000175000001440000011570612466660012013702 0ustar sulbeckusersNAME Test::Inter - framework for more readable interactive test scripts DESCRIPTION This is another framework for writing test scripts. Some of the syntax is loosely inspired by Test::More, and has most of it's functionality, but it is not a drop-in replacement. Test::More (and other existing test frameworks) suffer from two weaknesses, both of which have prevented me from ever using them: None offer the ability to access specific tests in a reasonably interactive fashion None offer the ability to write the tests in whatever format would make the tests the most readable The way I write and use test scripts, existing Test::* modules are not nearly as useful as they could be. Test scripts written using Test::More work fine when running as part of the test suite, but debugging an individual test requires extra steps, and the tests themselves are not as readable as they should be. I do most of my debugging using test scripts. When I find a bug, I write a test case for it, debug it using the test script, and then leave the test there so the bug won't come back (hopefully). Since I use test scripts in two ways (part of a standard test suite and to run the scripts in some interactive way to debug problems), I want to be able to do the following trivially: Easy access to a specific test or tests If I'm running the test script interactively (perhaps in the debugger), there are several common functions that I want to have available, including: Run only a single test, or a subset of tests Set a breakpoint in the debugger to run up to the start of the Nth test Better diagnostics When running a test script as part of a test suite, the pass/fail status is really the only thing of interest. You just want to know if the module passes all the tests. When running interactively, additional information may allow me to quickly track down the problem without even resorting to a debugger. If a test fails, I almost always want to see why it failed if I'm running it interactively. If reasonable, I want to see a list of what was input, what was output, and what was expected. The other feature that I wanted in a test suite is the ability to define the tests in a format that is natural and readable FOR THE TESTS. In almost every case, it is best to think of a test script as consisting of two separate parts: a script part, and a test part. The script part of a test script is the least important part! It's usually fairly trivial, rarely needs to be changed, and is not the focus of the test script. The tests part of the script IS the important part, and these should be expressed in a form that is natural to them, easy to maintain, easy to read, and easy to modify, and none of these should involve modifying the script portion of the test script in general. Because the content of the tests is the important part of the script, the emphasis should be in making them more readable, even at the expense of the script portion. As a general rule, if the script portion of the test script obscures the tests in any way, it's not written correctly! The solution to this is well understood, and is common to many other systems where you are mixing two "languages". The task of correctly specifying both the tests and the test script is virtually identical to the task of creating a PHP script which consists of a mixture of PHP and HTML, or the task of creating a template file using some templating system where the file consists of a mixture of text to be displayed and templating commands. It is well understood in each of these cases that the more the two "languages" are interwoven, the less readable both are, and the harder it is to maintain. The more you are able to separate the two, the easier both are to read and maintain. As often as possible, I want the tests to be written in some sort of text format which can be easily read as a table with no perl commands interspersed. I want to the freedom to define the tests in one section (a long string, the DATA section, or even in a separate file) which is easily readable. This may introduce the necessity of parsing it, but it makes it significantly easier to maintain the tests. This flexibility makes it much easier to read the tests (as opposed to the script) which is the fundamental content of a test script. To illustrate some of this, in Test::More, a series of tests might be specified as: # test 1 $result = func("apples","bushels"); is($result, "enough"); # test 2 $result = func("grapefruit","tons"); is($result, "enough"); # test 3 $result = func("oranges","boatloads"); is($result, "insufficient"); Thinking about the features I want that I listed above, there are several difficulties with this. Debugging the script is tedious Say you ran the test suite, and test 3 failed. To debug it using a traditional Test::* module, you have to open up the test script, find the 3rd test (which won't necessarily be trivial, especially if you're talking about the 103rd test), and then run the debugger setting a break point at that line number. This sequence of steps is typically not very hard (especially when the test script is as simple as the example above), but it's still lots of steps that serve only to break your train of thought. How much better to be able to set a break point in the function that actually performs the test for the Nth test. It would also be nice to be able to skip the first two tests... perhaps they take a long time to run, and I want to get right to work on test 3. You can do this easily too by setting the $::TI_START variable. Way too much perl interspersed with the tests It's difficult to read the tests individually in this script because there is too much perl code among them, and virtually impossible to look at them as a whole. It is true that looking at this particular example, it is very simple... but the script ISN'T the content you're interested in (and bear in mind that many test scripts are nowhere near this simple). The REAL content of this script are the tests, which consist of the function arguments and the expected result. Although it's not impossible to see each of these in the script above, it's not in a format that is conducive to studying the tests, and especially not for examining the list of tests as a whole. Now, look at an alternate way of specifying the tests using this module: $tests = " apples bushels => enough grapefruit tons => enough oranges boatloads => insufficient "; $o->tests(tests => $tests, func => \&func); Here, it's easy to see the list of tests, and adding additional tests is a breeze. This module supports a number of methods for defining tests, so you can use whichever one is most convenient (including methods that are identical to Test::More if that really is the best method). In addition, the following debugger command works as desired: b func ($::TI_NUM==3) and you're ready to debug (assuming that the test function is named 'func'). CREATING A TEST Every test may have several pieces of information: A name Every test is automatically assigned a number, but it may be useful to specify a name of a test (which is actually a short description of the test). Whenever a test result is reported, the name will be given (if one was specified). The name may not have a '#' in it. The name is completely optional, but makes the results more readable. An expected result In order to test something, you need to know what result was expected (or in some cases, what result was NOT expected). A function and arguments OR a result You also need to know the results that you're comparing to the expected results. This can be obtained by simply working with a set of results, or a function name and a set of arguments to pass to it. Conditions It is useful to be able to specify state information at the start of the test suite (for example, to see if certain features are available), and some tests may only run if those conditions are met. If no conditions are set for a test, it will always run. Todo tests Some tests may be marked as 'todo' tests. These are test which are allowed to fail (meaning that they have been put in place for an as-yet unimplemented feature). Since it is expected that the test will fail, the test suite will still pass, even if these tests fail. The tests will still run and if they pass, a message is issued saying that the feature is now implemented, and the tests should be graduated to non-todo state. BASE METHODS new $o = new Test::Inter [$name] [%options]; This creates a new test framework. There are several options which may be used to specify which tests are run, how they are run, and what output is given. The entire test script can be named by passing in $name. Options can be passed in as a hash of ($opt,$val) pairs. Options can be set in four different ways. First, you can pass in an ($opt,$val) pair in the new method. Second, you can set an environment variable (which overrides any value passed to the new method). Third, you can set a global variable (which overrides both the environment variable and options passed to the new method). Forth, you can call the appropriate method to set the option. This overrides all other methods. Each of the allowed options are described below in the following base methods: start end testnum plan abort quiet mode skip_all width version $o->version(); Returns the version of the module. start $o = new Test::Inter 'start' => $N; $o->start($N) To define which test you want to start with, pass in an ($opt,$val) pair of ('start',N), set an environment variable TI_START=N, or a global variable $::TI_START=N. When the start test is defined, most tests numbered less than N are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. One class of tests IS still executed. Tests run using the require_ok or use_ok methods (to test the loading of modules) are still run. If no value (or a value of 0) is used, it defaults to the first test. end $o = new Test::Inter 'end' => $M; $o->end($M); To define which test you want to end with, pass in an ($opt,$val) pair of ('end',M), set an environment variable TI_END=M, or set a global variable $::TI_END=M. When the end test is defined, all tests numbered more than M are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. If no value is given, it defaults to 0 (which means that all remaining tests are run). testnum $o = new Test::Inter 'testnum' => $N; $o->testnum($N); This is used to run only a single test. It is equivalent to setting both the start and end tests to $N. plan done_testing $o = new Test::Inter 'plan' => $N; $o->plan($n); $o->done_testing(); $o->done_testing($n); The TAP API (the 'language' used to run a sequence of tests and see which ones failed and which ones passed) requires a statement of the number of tests that are expected to run. This statement can appear at the start of the test suite, or at the end. If you know in advance how many tests should run in the test script, you can pass in a non-zero integer in a ('plan',N) pair to the new method, or set the TI_PLAN environment variable or the $::TI_PLAN global variable, or call the plan method. If you know how many tests should run at the end of the test script, you can pass in a non-zero integer to the done_testing method. Frequently, you don't really care how many tests are in the script (especially if new tests are added on a regular basis). In this case, you still need to include a statement that says that the number of tests expected is however many were run. To do this, call the done_testing method with no argument. NOTE: if the plan method is used, it MUST be used before any tests are run (including those that test the loading of modules). If the done_testing method is used, it MUST be called after all tests are run. You must specify a plan or use a done_testing statement, but you cannot do both. It is NOT strictly required to set a plan if the script is only run interactively, so if for some reason this module is used for test scripts which are not part of a standard perl test suite, the plan and done_testing statements are optional. As a matter of fact, the script will run just fine without them... but a perl installer will report a failure in the test suite. abort $o = new Test::Inter 'abort' => 0/1/2; $o->abort(0/1/2); The abort option can be set using an ('abort',0/1/2) option pair, or by setting the TI_ABORT environment variable, or the $::TI_ABORT global variable. If this is set to 1, the test script will run unmodified until a test fails. At that point, all remaining tests will be skipped. If it is set to 2, the test script will run until a test fails at which point it will exit with an error code of 1. In both cases, todo tests will NOT trigger the abort behavior. quiet $o = new Test::Inter 'quiet' => 0/1/2; $o->quiet(0/1/2); The quiet option can be set using an ('quiet',0/1/2) option pair, or by setting the TI_QUIET environment variable, or the $::TI_QUIET global variable. If this is set to 0 (the default), all information will be printed out. If it is set to 1, some optional information will not be printed. If it is set to 2, all optional information will not be printed. mode $o = new Test::Inter 'mode' => MODE; $o->mode(MODE); The mode option can be set using a ('mode',MODE) option pair, or by setting the TI_MODE environment variable, or the $::TI_MODE global variable. Currently, MODE can be 'test' or 'inter' meaning that the script is run as part of a test suite, or interactively. When run in test mode, it prints out the results using the TAP grammar (i.e. 'ok 1', 'not ok 3', etc.). When run in interactive mode, it prints out results in a more human readable format. width $o = new Test::Inter 'width' => WIDTH; $o->width(WIDTH); The width option can be set using a ('width',WIDTH) option pair, or by setting the TI_WIDTH environment variable, or the $::TI_WIDTH global variable. WIDTH is the width of the terminal (for printing out failed test information). It defaults to 80, but it can be set to any width (and lines longer then this are truncated). If WIDTH is set to 0, no truncation is done. skip_all $o = new Test::Inter 'skip_all' => REASON; $o->skip_all(REASON); The skip_all option can be set using an ('skip_all',REASON) option pair, or by setting the TI_SKIP_ALL environment variable, or the $::TI_SKIP_ALL global variable. If this is set, the entire test script will be skipped for the reason given. This must be done before any test is run, and before any plan number is set. The skip_all can also be called at any point during the script (i.e. after tests have been run). In this case, all remaining scripts will be skipped. $o->skip_all(REASON,FEATURE,FEATURE,...); $o->skip_all('',FEATURE,FEATURE,...); This will skip all tests (or all remaining tests) unless all features are available. REASON can be entered as an empty string and the reason the tests are skipped will be a message about the missing feature. feature $o->feature($feature,$val); This defines a feature. If $val is non-zero, the feature is available. Otherwise it is not. diag note $o->diag($message); $o->note($message); Both of these print an optional message. Messages printed with the note method are always optional and will be omitted if the quiet option is set to 1 or 2. Messages printed with the diag method are optional and will not be printed if the quiet option is set to 2, but they will be printed if the quiet method is set to 1. testdir Occasionally, it may be necessary to know the directory where the tests live (for example, there may be a config or data file in there). This method will return the directory. METHODS FOR LOADING MODULES Test scripts can load other modules (using either the perl 'use' or 'require' commands). There are three different modes for doing this which determine how this is done. required mode By default, this is used to test for a module that is required for all tests in the test script. Loading the module is treated as an actual test in the test suite. The test is to determine whether the module is available and can be loaded. If it can be loaded, it is, and it is reported as a successful test. If it cannot be loaded, it is reported as a failed test. In the result of a failed test, all remaining tests will be skipped automatically (except for other tests which load modules). feature mode In feature mode, loading the module is not treated as a test (i.e. it will not print out an 'ok' or 'not ok' line. Instead, it will set a feature (named the same as the module) which can be used to determine whether other tests should run or not. forbid mode In a few very rare cases, we may want to test for a module but expect that it not be present. This is the exact opposite of the 'required' mode. Successfully loading the module is treated as a test failure. In the event of a failure, all remaining tests will be skipped. The methods available are: require_ok $o->require_ok($module [,$mode]); This is used to load a module using the perl 'require' function. If $mode is not passed in, the default mode (required) is used to test the existence of the module. If $mode is passed in, it must be either the string 'forbid' or 'feature'. If $mode is 'feature', a feature named $module is set if the module was able to be loaded. use_ok $o->use_ok(@args [,$mode]); This is used to load a module with 'use', or check a perl version. BEGIN { $o->use_ok('5.010'); } BEGIN { $o->use_ok('Some::Module'); } BEGIN { $o->use_ok('Some::Module',2.05); } BEGIN { $o->use_ok('Some::Module','foo','bar'); } BEGIN { $o->use_ok('Some::Module',2.05,'foo','bar'); } are the same as: use 5.010; use Some::Module; use Some::Module 2.05; use Some::Module qw(foo bar); use Some::Module 2.05 qw(foo bar); Putting the use_ok call in a BEGIN block allows the functions to be imported at compile-time and prototypes are properly honored. You'll also need to load the Test::Inter module, and create the object in a BEGIN block. $mode acts the same as in the require_ok method. METHODS FOR RUNNING TEST There are several methods for running tests. The ok, is, and isnt methods are included for those already comfortable with Test::More and wishing to stick with the same format of test script. The tests method is the suggested method though since it makes use of the full power of this module. ok $o->ok(TESTS); A test run with ok looks at a result, and if it evaluates to 0 (or false), it fails. If it evaluates to non-zero (or true), it passes. These tests do not require you to specify the expected results. If expected results are given, they will be compared against the result received, and if they differ, a diagnostic message will be printed, but the test will still succeed or fail based only on the actual result produced. These tests require a single result and either zero or one expected results. To run a single test, use any of the following: $o->ok(); # always succeeds $o->ok($result); $o->ok($result,$name); $o->ok($result,$expected,$name); $o->ok(\&func); $o->ok(\&func,$name); $o->ok(\&func,$expected,$name); $o->ok(\&func,\@args); $o->ok(\&func,\@args,$name); $o->ok(\&func,\@args,$expected,$name); If $result is a scalar, the test passes if $result is true. If $result is a list reference, and the list is either empty, or the first element is a scalar), the test succeeds if the list contains any values (except for undef). If $result is a hash reference, the test succeeds if the hash contains any key with a value that is not undef. If \&func and \@args are passed in, then $result is generated by passing @args to &func and behaves identically to the calls where $result is passed in. If \&func is passed in but no arguments, the function takes no arguments, but still produces a result. $result may be a scalar, list reference, or hash reference. If it is a list reference, the test passes is the list contains any defined values. If it is a hash reference, the test passes if any of the keys contain defined values. If an expected value is passed in and the result does not match it, a diagnostic warning will be printed, even if the test passes. is isnt $o->is(TESTS); $o->isnt(TESTS); A test run with is looks at a result and tests to see if it is identical to an expected result. If it is, the test passes. Otherwise it fails. In the case of a failure, a diagnostic message will show what result was actually obtained and what was expected. A test run with isnt looks at a result and tests to see if the result obtained is different than an expected result. If it is different, the test passes. Otherwise it fails. The is method can be called in any of the following ways: $o->is($result,$expected); $o->is($result,$expected,$name); $o->is(\&func,$expected); $o->is(\&func,$expected,$name); $o->is(\&func,\@args,$expected); $o->is(\&func,\@args,$expected,$name); The isnt method can be called in exactly the same way. As with the ok method, the result can be a scalar, hashref, or listref. If it is a hashref or listref, the entire structure must match the expected value. tests $o->tests($opt=>$val, $opt=>$val, ...) The options available are described in the following section. file $o->file($func,$input,$outputdir,$expected,$name [,@args]); Sometimes it may be easiest to store the input, output, and expected output from a test in a text file. In this case, each line of output will be treated as a single test, so the output and expected output must match up exactly. $func is a reference to a function which will produce a temporary output file. If $input is specified, it is the name of the input file. If it is empty, no input file will be used. The input file can be fully specified, or it can be relative to the test directory. If $outputdir is passed in, it is the directory where the output file will be written. It can be fully specified, or relative to the test directory. If $outputdir is left blank, the temporary file will be written to the test directory. $expected is the name of a file which contains the expected output. It can be fully specified, or it will be checked for in the test directory. $name is the name of this series of tests. @args are extra arguments to pass to the test function. The function will be called with the arguments: &$func( [$input,] $output,@args); $input is only passed in if it was passed in to this method. If no input file is specified, nothing will be passed to the function. $output is the name of a temporary file where the output will be written to. USING THE TESTS METHOD It is expected that most tests (except for those that load a module) will be run using the tests method called as: $o->tests($opt => $val, $opt => $val, ...); The following options are available: name name => NAME This sets the name of this set of tests. All tests will be given the same name. tests func expected In order to specify a series of tests, you have to specify either a function and a list of arguments, or a list of results. Specifying the function and list of arguments can be done using the pair: func => \&FUNCTION tests => TESTS If the func option is not set, tests contains a list of results. A list of expected results may also be given. They can be included in the tests => TESTS option or included separately as: expected => RESULTS The way to specify these are covered in the next section SPECIFYING THE TESTS. feature disable feature => [FEATURE1, FEATURE2, ...] disable => [FEATURE1, FEATURE2, ...] The default set of tests to run is determined using the start, end, and skip_all methods discussed above. Using those methods, a list of tests is obtained, and it is expected that these will run. The feature and disable options modify the list. If the feature option is included, the tests given in this call will only run if ALL of the features listed are available. If the disable option is included, the tests will be run unless ANY of the features listed are available. skip skip => REASON Skip these tests for the reason given. todo todo => 0/1 Setting this to 1 says that these tests are allowed to fail. They represent a feature that is not yet implemented. If the tests succeed, a message will be printed notifying the developer that the tests are now ready to promote to actual use. SPECIFYING THE TESTS A series of tests can be specified in two different ways. The tests can be written in a very simple string format, or stored as a list. Demonstrating how this can be done is best done by example, so let's say that there is a function (func) which takes two arguments, and returns a single value. Let's say that the expected output (and the actual output) from 3 different sets of arguments is: Input Expected Output Actual Output ----- --------------- ------------- 1,2 a a 3,4 b x 5,6 c c (so in this case, the first and third tests pass, but the 2nd one will fail). Specifying these tests as lists could be done as: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ [a], [b], [c] ], ); Here, the tests are stored as a list, and each element in the list is a listref containing the set of arguments. If the func option is not passed in, the tests option is set to a list of results to compare with the expected results, so the following is equivalent to the above: $o->tests( tests => [ [a], [x], [c] ], expected => [ [a], [b], [c] ], ); If an argument (or actual result) or an expected result is only a single value, it can be entered as a scalar instead of a list ref, so the following is also equivalent: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ a, b, [c] ], ); The only exception to this is if the single value is itself a list reference. In this case it MUST be included as a reference. In other words, if you have a single test, and the expected value for this test is a list reference, it must be passed in as: expected => [ [ \@r ] ] NOT as: expected => [ \@r ] Passing in a set of expected results is optional. If none are passed in, the tests are treated as if they had been passed to the 'ok' method (i.e. if they return something true, they pass, otherwise they fail). The second way to specify tests is as a string. The string is a multi-line string with each tests being separate from the next test by a blank line. Comments (lines which begin with '#') are allowed, and are ignored. Whitespace at the start and end of the line is ignored. The string may contain the results directly, or results may be passed in separately. For example, the following all give the same sets of tests as the example above: $o->tests( func => &func, tests => " # Test 1 1 2 => a # Test 2 3 4 => b 5 6 => c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a], [b], [c] ] ); $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => " a b c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => " a b c ", ); The expected results may also consist of only a single set of results (in this case, it must be passed in as a listref). In this case, all of the tests are expected to have the same results. So, the following are equivalent: $o->tests( func => &func, tests => " 1 2 => a b 3 4 => a b 5 6 => a b ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a, b] ], ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => "a b", ); The number of expected values must either be 1 (i.e. all of the tests are expected to produce the same value) or exactly the same number as the number of tests. The parser is actually quite powerful, and can handle multi-line tests, quoted strings, and nested data structures. The test may be split across any number of lines, provided there is not a completely blank line (which signals the end of the test), so the following are equivalent: tests => "a b c", tests => "a b c", Arguments (or expected results) may include data structures. For example, the following are equivalent: tests => "[ a b ] { a 1 b 2 }" tests => [ [ [a,b], { a=>1, b=>2 } ] ] Whitespace is mostly optional, but there is one exception. An item must end with some kind of delimiter, so the following will fail: tests => "[a b][c d]" The first element (the list ref [a b]) must be separated from the second element by the delimiter (which is whitespace in this case), so it must be written as: tests => "[a b] [c d]" As already demonstrated, hashrefs and listrefs may be included and nested. Elements may also be included inside parens, but this is optional since all arguments and expected results are already treated as lists, so the following are equivalent: tests => "a b c" tests => "(a b) c" Although parens are optional, they may make things more readable, and allow you to use something other than whitespace as the delimiter. If the character immediately following the opening paren, brace, or bracket is a punctuation mark, then it is used as the delimiter instead of whitespace. For example, the following are all equivalent: [ a b c ] [a b c] [, a,b,c ] [, a, b, c ] A delimiter is a single character, and the following may not be used as a delimiter: any opening/closing characters () [] {} single or double quotes alphanumeric characters underscore Whitespace (including newlines) around the delimiter is ignored, so the following is valid: [, a, b, c ] Two delimiters next to each other or a trailing delimiter produce an empty string. "(,a,b,)" => (a, b, '') "(,a,,b)" => (a, '', b) Hashrefs may be specified by braces and the following are equivalent: { a 1 b 2 } {, a,1,b,2 } {, a,1,b,2, } Note that a trailing delimiter is ignored if there are already an even number of elements, or an empty string otherwise. Nested structures are allowed: "[ [1 2] [3 4] ]" For example, $o->tests( func => &func, tests => "a [ b c ] { d 1 e 2 } => x y" ); is equivalent to: $o->tests( func => &func, tests => [ [a, [b,c], {d=>1,e=>2}] ], results => [ [x,y] ], ); Any single value can be surrounded by single or double quotes in order to include the delimiter. So: "(, a,'b,c',e )" is equivalent to: "( a b,c e )" Any single value can be the string '__undef__' which will be turned into an actual undef. If the value is '__blank__' it is turned into an empty string (''), though it can also be specified as '' directly. Any value can have an embedded newline by including a __nl__ in the value, but the value must be written on a single line. Expected results are separated from arguments by ' => '. ENVIRONMENT VARIABLES To summarize the information above, the following environment variables (and main:: variables) exist. Each can be set in a perl script as a variable in the main namespace: $::TI_END or as an environment variable: $ENV{TI_END} TI_START Set this to define the test you want to start with. Example: If you have a perl test script (my_test_script) and you want to start running it at test 12, run the following shell commands: TI_START=12 ./my_test_script.t TI_END Set this to define the test you want to end with. TI_TESTNUM Set this to run only a single test TI_QUIET How verbose the test script is. TI_MODE How the output is formatted. TI_WIDTH The width of the terminal. TI_NOCLEAN When running a file test, the temporary output file will not be removed if this is set. HISTORY The history of this module dates back to 1996 when I needed to write a test suite for my Date::Manip module. At that time, none of the Test::* modules currently available in CPAN existed (the earliest ones didn't come along until 1998), so I was left completely on my own writing my test scripts. I wrote a very basic version of my test framework which allowed me to write all of the tests as a string, it would parse the string, count the tests, and then run them. Over the years, the functionality I wanted grew, and periodically, I'd go back and reexamine other Test frameworks (primarily Test::More) to see if I could replace my framework with an existing module... and I've always found them wanting, and chosen to extend my existing framework instead. As I've written other modules, I've wanted to use the framework in them too, so I've always just copied it in, but this is obviously tedious and error prone. I'm not sure why it took me so long... but in 2010, I finally decided it was time to rework the framework in a module form. I loosely based my module on Test::More. I like the functionality of that module, and wanted most of it (and I plan on adding more in future versions). So this module uses some similar syntax to Test::More (though it allows a great deal more flexibility in how the tests are specified). One thing to note is that I may have been able to write this module as an extension to Test::More, but after looking into that possibility, I decided that it would be faster to not do that. I did "borrow" a couple of routines from it (though they've been modified quite heavily) as a starting point for a few of the functions in this module, and I thank the authors of Test::More for their work. KNOWN BUGS AND LIMITATIONS None known. SEE ALSO Test::More - the 'industry standard' of perl test frameworks LICENSE This script is free software; you can redistribute it and/or modify it under the same terms as Perl itself. AUTHOR Sullivan Beck (sbeck@cpan.org) Test-Inter-1.06/META.yml0000644000175000001440000000127712466660013014271 0ustar sulbeckusers--- abstract: 'framework for more readable interactive test scripts' author: - 'Sullivan Beck ' build_requires: Module::Build: '0.21' Test::More: '0' configure_requires: Module::Build: '0.21' dynamic_config: 1 generated_by: 'Module::Build version 0.4205, CPAN::Meta::Converter version 2.143240' license: perl meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: '1.4' name: Test-Inter provides: Test::Inter: file: lib/Test/Inter.pm version: '1.06' requires: File::Basename: '0' IO::File: '0' perl: '5.006' resources: license: http://dev.perl.org/licenses/ repository: https://github.com/SBECK-github/Test-Inter version: '1.06' Test-Inter-1.06/internal/0000755000175000001440000000000012404064450014620 5ustar sulbeckusersTest-Inter-1.06/internal/NOTES0000644000175000001440000000043012404064450015430 0ustar sulbeckusers######################################################################### Before every release, spellcheck the documents: for i in `find lib -type f -name \*.pod` ;do podspell $i > $i.txt ispell -p internal/ispell.isp $i.txt rm -f $i.txt $i.txt.bak done Test-Inter-1.06/internal/TODO0000644000175000001440000000245712404064450015320 0ustar sulbeckusersConsider adding the following from Test::More note explain can_ok isa_ok subtest cmp_ok like unlike handles strings match regexps lists contain certain element hashes contain certain keys Add support for input files Look at: Test::Group Test::Slow to see if there's anything I want to add. Look at using Data::PrettyPrintObjects to print out the results. Add support for timing tests: ok 1 (12 ms) ok 2 (23 ms) ========================================================================= OLD STUFF # Add a variable TI_TODO # if '' (default) pass even if TODO tests fail # if 'warn', pass but warn about TODO tests # if 'fail', fail if TODO tests fail # Add functions: # like, unlike # cmp_ok # can_ok # isa_ok # new_ok # subtest # pass, fail # is_deeply # explain # Conditional tests # skip(N) skip next N (defaults to 1) tests # todo_skip # ARGS => VALS # true if returned values are itentical to VALS # ARGS =>OP VALS # =>is # =>isnt # =>like # =>unlike # =>cmp(NOP) # NOP : < > <= >= != # =>cmp(N,M) # true if VAL > N and VAL < M # =>cmp[N,M] # true if VAL >= N and VAL <= M # =>cmp(N,M] # =>cmp[N,M) # =>member # true if the return value is any of the VALS Test-Inter-1.06/internal/ispell.isp0000644000175000001440000000010712404064450016623 0ustar sulbeckusersCPAN diag hashrefs isnt listref listrefs ok paren testdir testnum todo Test-Inter-1.06/internal/build.yaml0000644000175000001440000000004212404053065016577 0ustar sulbeckusers--- script: examples: 0 git: 1 Test-Inter-1.06/lib/0000755000175000001440000000000012404053065013552 5ustar sulbeckusersTest-Inter-1.06/lib/Test/0000755000175000001440000000000012404064450014471 5ustar sulbeckusersTest-Inter-1.06/lib/Test/Inter.pm0000644000175000001440000011606212404064450016116 0ustar sulbeckuserspackage Test::Inter; # Copyright (c) 2010-2014 Sullivan Beck. All rights reserved. # This program is free software; you can redistribute it and/or modify it # under the same terms as Perl itself. ############################################################################### require 5.004; use warnings; use strict; use File::Basename; use IO::File; our($VERSION); $VERSION = '1.06'; ############################################################################### # BASE METHODS ############################################################################### sub version { my($self) = @_; return $VERSION; } sub new { my($class,@args) = @_; my($name,%opts); if (@args % 2) { ($name,%opts) = @args; } else { $name = $0; $name =~ s,^\./,,; %opts = @args; } # The basic structure my $self = { 'name' => $name, # the name of the test script 'start' => 1, # the first test to run 'end' => 0, # the last test to end 'plan' => 0, # the number of tests planned 'abort' => 0, # abort on the first failed test 'quiet' => 0, # if 1, no output on successes # (this should only be done when # running as an interactive script) 'mode' => 'test', # mode to run script in 'width' => 80, # width of terminal 'features' => {}, # a list of available features 'skipall' => '', # the reason for skipping all # remaining tests 'plandone' => 0, # 1 if a plan is done 'testsrun' => 0, # 1 if any tests have been run 'libdir' => '', # a directory to load modules from 'testdir' => '', # the test directory }; bless $self, $class; $main::TI_NUM = 0; # Handle options, environment variables, global variables my @opts = qw(start end testnum plan abort quiet mode width skip_all); my %o = map { $_,1 } @opts; no strict 'refs'; foreach my $opt (@opts) { if (! exists $o{$opt}) { $self->_die("Invalid option to new method: $opt"); } my $OPT = uc("ti_$opt"); if (exists $opts{opt} || exists $ENV{$OPT} || defined ${ "main::$OPT" }) { my $val; if (defined ${ "main::$OPT" }) { $val = ${ "main::$OPT" }; } elsif (exists $ENV{$OPT}) { $val = $ENV{$OPT}; } else { $val = $opts{$opt}; } &{ "Test::Inter::$opt" }($self,$val); } } if ($$self{'mode'} ne 'test') { print "\nRunning $name...\n"; } # Find the test directory # # Scripts will either be run: # directly (look at $0) # as a test suite (look for ./t and ../t) my($TDIR,@LIBDIR); if (-f "$0") { my $COM = $0; $TDIR = dirname($COM); $TDIR = '.' if (! $TDIR); } elsif (-d 't') { $TDIR = 't'; } else { $TDIR = '.'; } @LIBDIR = ('.'); if (-d "$TDIR/lib") { push(@LIBDIR,"$TDIR/lib"); } if (-d "$TDIR/../lib") { push(@LIBDIR,"$TDIR/../lib"); } $$self{'testdir'} = $TDIR; $$self{'libdir'} = \@LIBDIR; return $self; } sub testdir { my($self) = @_; return $$self{'testdir'}; } sub start { my($self,$val) = @_; $val = 1 if (! defined($val)); $self->_die("start requires an integer value") if ($val !~ /^\d+$/); $$self{'start'} = $val; } sub end { my($self,$val) = @_; $val = 0 if (! $val); $self->_die("end requires an integer value") if ($val !~ /^\d+$/); $$self{'end'} = $val; } sub testnum { my($self,$val) = @_; if (! defined($val)) { $$self{'start'} = 1; $$self{'end'} = 0; } else { $self->_die("testnum requires an integer value") if ($val !~ /^\d+$/); $$self{'start'} = $$self{'end'} = $val; } } sub plan { my($self,$val) = @_; if ($$self{'plandone'}) { $self->_die('Plan/done_testing included twice'); } $$self{'plandone'} = 1; $val = 0 if (! defined($val)); $self->_die("plan requires an integer value") if ($val !~ /^\d+$/); $$self{'plan'} = $val; if ($val != 0) { $self->_plan($val); } } sub done_testing { my($self,$val) = @_; if ($$self{'plandone'}) { $self->_die('Plan/done_testing included twice'); } $$self{'plandone'} = 1; $val = $main::TI_NUM if (! $val); $self->_die("done_testing requires an integer value") if ($val !~ /^\d+$/); $self->_plan($val); if ($val != $main::TI_NUM) { $self->_die("Ran $main::TI_NUM tests, expected $val"); } } sub abort { my($self,$val) = @_; $val = 0 if (! $val); $$self{'abort'} = $val; } sub quiet { my($self,$val) = @_; $val = 0 if (! $val); $$self{'quiet'} = $val; } sub mode { my($self,$val) = @_; $val = 'test' if (! $val); $$self{'mode'} = $val; } sub width { my($self,$val) = @_; $val = 0 if (! $val); $$self{'width'} = $val; } sub skip_all { my($self,$reason,@features) = @_; if (@features) { my $skip = 0; foreach my $feature (@features) { if (! exists $$self{'features'}{$feature} || ! $$self{'features'}{$feature}) { $skip = 1; $reason = "Required feature ($feature) missing" if (! $reason); last; } } return if (! $skip); } if ($$self{'plandone'} || $$self{'testsrun'}) { $reason = 'Remaining tests skipped' if (! $reason); $$self{'skipall'} = $reason; } else { $reason = 'Test script skipped' if (! $reason); $self->_plan(0,$reason); exit 0; } } sub _die { my($self,$message) = @_; print "ERROR: $message\n"; exit 1; } sub feature { my($self,$feature,$val) = @_; $$self{'features'}{$feature} = $val; } sub diag { my($self,$message) = @_; return if ($$self{'quiet'} == 2); $self->_diag($message); } sub note { my($self,$message) = @_; return if ($$self{'quiet'}); $self->_diag($message); } ############################################################################### # LOAD METHODS ############################################################################### # The routines were originally from Test::More (though they have been # changed... some to a greater extent than others). sub require_ok { my($self,$module,$mode) = @_; $mode = '' if (! $mode); $main::TI_NUM++ unless ($mode eq 'feature'); my $pack = caller; my @inc = map { "unshift(\@INC,'$_');\n" } @{ $$self{'libdir'} }; my($desc,$code); if ( $module =~ /^\d+(?:\.\d+)?$/ ) { # A perl version check. $desc = "require perl $module"; $code = <_is_module_name($module); $desc = "require $module"; $code = <_eval($code); chomp($eval_error); my @eval_error = split(/\n/,$eval_error); foreach my $err (@eval_error) { $err =~ s/ \(\@INC contains.*//; # strip out the actual @INC values } my $ok = 1; if ($eval_result) { # Able to load the module if ($mode eq 'forbid') { $$self{'skipall'} = 'Loaded a module not supposed to be present'; $self->_not_ok($desc); $self->_diag('Test required that module not be loadable') unless ($$self{'quiet'} == 2); $ok = 0; } elsif ($mode eq 'feature') { $self->feature($module,1); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature available: $module"); } } else { $self->_ok($desc); } } else { # Unable to load the module if ($mode eq 'forbid') { $self->_ok($desc); } elsif ($mode eq 'feature') { $self->feature($module,0); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature unavailable: $module"); } } else { $$self{'skipall'} = 'Unable to load a required module'; $self->_not_ok($desc); $ok = 0; } } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $err (@eval_error) { $self->_diag($err); } } sub use_ok { my($self,@args) = @_; my $mode = ''; if ($args[$#args] eq 'forbid' || $args[$#args] eq 'feature') { $mode = pop(@args); } $main::TI_NUM++ unless ($mode eq 'feature'); my $pack = caller; my($code,$desc,$module); if ( @args == 1 and $args[0] =~ /^\d+(?:\.\d+)?$/ ) { # A perl version check. $desc = "require perl $args[0]"; $module = 'perl'; $code = <_is_module_name($module)) { $self->_not_ok("use module: invalid module name: $module"); return; } my $vers = ''; if ( @args and $args[0] =~ /^\d+(?:\.\d+)?$/ ) { $vers = shift(@args); } my $imports = (@args ? 'qw(' . join(' ',@args) . ')' : ''); $desc = "use $module $vers $imports"; my @inc = map { "unshift(\@INC,'$_');\n" } @{ $$self{'libdir'} }; $code = <_not_ok('use module: no module specified'); return; } $desc .= ' (should not load)' if ($mode eq 'forbid'); $desc .= ' (feature)' if ($mode eq 'feature'); my($eval_result,$eval_error) = $self->_eval($code); chomp($eval_error); my @eval_error = split(/\n/,$eval_error); @eval_error = grep(!/^BEGIN failed--compilation aborted/,@eval_error); foreach my $err (@eval_error) { $err =~ s/ \(\@INC contains.*//; # strip out the actual @INC values } my $ok = 1; if ($eval_result) { # Able to load the module if ($mode eq 'forbid') { $$self{'skipall'} = 'Loaded a module not supposed to be present'; $self->_not_ok($desc); $self->_diag('Test required that module not be usable') unless ($$self{'quiet'} == 2); $ok = 0; } elsif ($mode eq 'feature') { $self->feature($module,1); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature available: $module"); } } else { $self->_ok($desc); } } else { # Unable to load the module if ($mode eq 'forbid') { $self->_ok($desc); } elsif ($mode eq 'feature') { $self->feature($module,0); if (! $$self{'quiet'}) { $self->_diag($desc); $self->_diag("Feature unavailable: $module"); } } else { $$self{'skipall'} = 'Unable to load a required module'; $self->_not_ok($desc); $ok = 0; } } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $err (@eval_error) { $self->_diag($err); } } sub _is_module_name { my($self,$module) = @_; # Module names start with a letter. # End with an alphanumeric. # The rest is an alphanumeric or :: $module =~ s/\b::\b//g; return $module =~ /^[a-zA-Z]\w*$/ ? 1 : 0; } sub _eval { my($self,$code) = @_; my( $sigdie, $eval_result, $eval_error ); { local( $@, $!, $SIG{__DIE__} ); # isolate eval $eval_result = eval $code; $eval_error = $@; $sigdie = $SIG{__DIE__} || undef; } # make sure that $code got a chance to set $SIG{__DIE__} $SIG{__DIE__} = $sigdie if defined $sigdie; return( $eval_result, $eval_error ); } ############################################################################### # OK/IS/ISNT METHODS ############################################################################### sub ok { my($self,@args) = @_; $main::TI_NUM++; my($op,@ret) = $self->_ok_result(@args); my($name,@diag); my $ok = 1; if ($op eq 'skip') { my $reason = shift(@ret); $self->_skip($reason); } elsif ($op eq 'pass') { ($name,@diag) = @ret; $self->_ok($name); } else { ($name,@diag) = @ret; $self->_not_ok($name); $ok = 0; } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $diag (@diag) { $self->_diag($diag); } } sub _ok_result { my($self,@args) = @_; # Test if we're skipping this test my($skip,$reason) = $self->_skip_test(); return ('skip',$reason) if ($skip); # No args == always pass if (@args == 0) { return ('pass','Empty test'); } # Get the result my($func,$funcargs,$result) = $self->_get_result(\@args); # Get name/expected my($name,$expected); if (@args == 1) { $name = $args[0]; } elsif (@args == 2) { ($expected,$name) = @args; } elsif (@args > 2) { return(0,'','Improperly formed test: too many arguments'); } # Check the result my($pass,@diag) = $self->_cmp_result('ok',$func,$funcargs,$result,$expected); return($pass,$name,@diag); } sub is { my($self,@args) = @_; $self->_is("is",@args); } sub isnt { my($self,@args) = @_; $self->_is("isnt",@args); } sub _is { my($self,$is,@args) = @_; $main::TI_NUM++; my($op,@ret) = $self->_is_result($is,@args); my($name,@diag); my $ok = 1; if ($op eq 'skip') { my $reason = shift(@ret); $self->_skip($reason); } elsif ($op eq 'pass') { ($name,@diag) = @ret; $self->_ok($name); } else { ($name,@diag) = @ret; $self->_not_ok($name); $ok = 0; } return if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $diag (@diag) { $self->_diag($diag); } } sub _is_result { my($self,$is,@args) = @_; # Test if we're skipping this test my($skip,$reason) = $self->_skip_test(); return ('skip',$reason) if ($skip); # Test args if (@args < 2) { return ('fail','','Improperly formed test: too few arguments'); } my($func,$funcargs,$result) = $self->_get_result(\@args); my($name,$expected); if (@args == 1) { ($expected) = @args; } elsif (@args == 2) { ($expected,$name) = @args; } else { return(0,'','Improperly formed test: too many arguments'); } # Check the result my($pass,@diag) = $self->_cmp_result($is,$func,$funcargs,$result,$expected); return($pass,$name,@diag); } # Returns $func,$args and $results. The first two are returned only if # there is a function. # sub _get_result { my($self,$args) = @_; my($func,@funcargs,@result,$result); if (ref($$args[0]) eq 'CODE') { $func = shift(@$args); if (ref($$args[0]) eq 'ARRAY') { @funcargs = @{ $$args[0] }; shift(@$args); } @result = &$func(@funcargs); return ($func,\@funcargs,\@result); } elsif (ref($$args[0]) eq 'ARRAY') { @result = @{ $$args[0] }; shift(@$args); return ('','',\@result); } else { $result = shift(@$args); return ('','',$result); } } sub _cmp_result { my($self,$type,$func,$funcargs,$result,$expected) = @_; my $pass = 0; my $identical = 0; my @diag; if ($type eq 'ok') { if (ref($result) eq 'ARRAY') { foreach my $ele (@$result) { $pass = 1 if (defined($ele)); } } elsif (ref($result) eq 'HASH') { foreach my $key (keys %$result) { my $val = $$result{$key}; $pass = 1 if (defined($val)); } } else { $pass = ($result ? 1 : 0); } if (! defined($expected)) { # If no expected result passed in, we don't test the results $identical = 1; } else { # Results/expected must be the same structure $identical = $self->_cmp_structure($result,$expected); } } else { $identical = $self->_cmp_structure($result,$expected); if ($type eq 'is') { $pass = $identical; } else { $pass = 1 - $identical; } } if (! $identical && $type ne 'isnt') { if ($func) { push(@diag,"Arguments: " . $self->_stringify($funcargs)); } push(@diag, "Results : " . $self->_stringify($result)); push(@diag, "Expected : " . $self->_stringify($expected)) unless ($type eq 'ok' && ! defined($result)); } return (($pass ? 'pass' : 'fail'),@diag); } # Turn a data structure into a string (poor-man's Data::Dumper) sub _stringify { my($self,$s) = @_; my($str) = $self->__stringify($s); my($width) = $$self{'width'}; if ($width) { $width -= 21; # The leading string $width = 10 if ($width < 10); $str = substr($str,0,$width) if (length($str)>$width); } return $str; } sub __stringify { my($self,$s) = @_; if (! defined($s)) { return '__undef__'; } elsif (ref($s) eq 'ARRAY') { my $str = '[ '; foreach my $val (@$s) { $str .= $self->__stringify($val) . ' '; } $str .= ']'; return $str; } elsif (ref($s) eq 'HASH') { my $str = '{ '; foreach my $key (sort keys %$s) { my $key = $self->__stringify($key); my $val = $self->__stringify($$s{$key}); $str .= "$key=>$val "; } $str .= '}'; return $str; } elsif (ref($s)) { return '<' . ref($s) . '>'; } elsif ($s eq '') { return "''"; } else { if ($s =~ /\s/) { my $q = qr/\'/; # single quote my $qq = qr/\"/; # double quote if ($s !~ $q) { return "'$s'"; } if ($s !~ $qq) { return '"' . $s . '"'; } return "<$s>"; } else { return $s; } } } sub _cmp_structure { my($self,$s1,$s2) = @_; return 1 if (! defined($s1) && ! defined($s2)); # undef = undef return 0 if (! defined($s1) || ! defined($s2)); # undef != def return 0 if (ref($s1) ne ref($s2)); # must be same type if (ref($s1) eq 'ARRAY') { return 0 if ($#$s1 != $#$s2); # two lists must be the same length foreach (my $i=0; $i<=$#$s1; $i++) { return 0 unless $self->_cmp_structure($$s1[$i],$$s2[$i]); } return 1; } elsif (ref($s1) eq 'HASH') { my @k1 = keys %$s1; my @k2 = keys %$s2; return 0 if ($#k1 != $#k2); # two hashes must be the same length foreach my $key (@k1) { return 0 if (! exists $$s2{$key}); # keys must be the same return 0 unless $self->_cmp_structure($$s1{$key},$$s2{$key}); } return 1; } elsif (ref($s1)) { # Two references (other than ARRAY and HASH are assumed equal. return 1; } else { # Two scalars are compared stringwise return ($s1 eq $s2); } } sub _skip_test { my($self) = @_; if ($$self{'skipall'}) { return (1,$$self{'skipall'}); } elsif ( $main::TI_NUM < $$self{'start'} || ($$self{'end'} && $main::TI_NUM > $$self{'end'}) ) { return (1,'Test not in list of tests specified to run'); } return 0; } ############################################################################### # FILE METHOD ############################################################################### sub file { my($self,$func,$input,$outputdir,$expected,$name,@args) = @_; $name = "" if (! $name); if (! ref($func) eq 'CODE') { $self->_die("file method required a coderef"); } my @funcargs; my $testdir = $$self{'testdir'}; # Input file if ($input) { if (-r $input) { # Nothing } elsif (-r "$testdir/$input") { $input = "$testdir/$input"; } else { $self->_die("Input file not readable: $input"); } push(@funcargs,$input); } # Output file and directory if (! $outputdir) { if (-d $testdir && -w $testdir) { $outputdir = $testdir; } else { $outputdir = "."; } } if ($outputdir) { if (! -d $outputdir || ! -w $outputdir) { $self->_die("Output directory not writable: $outputdir"); } } my $output = "$outputdir/tmp_test_inter"; push(@funcargs,$output); # Expected output if (! $expected) { $self->_die("Expected output file not specified"); } elsif (-r $expected) { # Nothing } elsif (-r "$testdir/$expected") { $expected = "$testdir/$expected"; } else { $self->_die("Expected output file not readable: $expected"); } # Create the temporary output file. &$func(@funcargs,@args); if (! -r "$output") { $self->_die("Output file not created"); } # Test each line my $in = new IO::File; $in->open($output); my @out = <$in>; $in->close(); chomp(@out); $in->open($expected); my @exp = <$in>; $in->close(); chomp(@exp); unlink($output) if (! $ENV{'TI_NOCLEAN'}); while (@out < @exp) { push(@out,''); } while (@exp < @out) { push(@exp,''); } for (my $i=0; $i<@out; $i++) { my $line = $i+1; my $n = ($name ? "$name : Line $line" : "Line $line"); $self->_is('is',$out[$i],$exp[$i],$n); } } ############################################################################### # TESTS METHOD ############################################################################### sub tests { my($self,%opts) = @_; # # feature => [ FEATURE, FEATURE, ... ] # disable => [ FEATURE, FEATURE, ... ] # my $skip = ''; if (exists $opts{'feature'}) { foreach my $feature (@{ $opts{'feature'} }) { $skip = "Required feature unavailable: $feature", last if (! exists $$self{'features'}{$feature}); } } if (exists $opts{'disable'} && ! $skip) { foreach my $feature (@{ $opts{'disable'} }) { $skip = "Disabled due to feature being available: $feature", last if (exists $$self{'features'}{$feature}); } } # # name => NAME # skip => REASON # todo => 0/1 # my $name = ''; if (exists $opts{'name'}) { $name = $opts{'name'}; } if (exists $opts{'skip'}) { $skip = $opts{'skip'}; } my $todo = 0; if (exists $opts{'todo'}) { $todo = $opts{'todo'}; } # # tests => STRING OR LISTREF # func => CODEREF # expected => STRING OR LISTREF # # tests if (! exists $opts{'tests'}) { $self->_die("invalid test format: tests required"); } my $tests = $opts{'tests'}; my(%tests,$gotexpected); my($ntest,$nexp); if (ref($tests) eq 'ARRAY') { my @results = @$tests; $ntest = 0; foreach my $result (@results) { $ntest++; $tests{$ntest}{'err'} = 0; if (ref($result) eq 'ARRAY') { $tests{$ntest}{'args'} = $result; } else { $tests{$ntest}{'args'} = [$result]; } } $gotexpected = 0; } else { ($ntest,$gotexpected,%tests) = $self->_parse($tests); $nexp = $ntest if ($gotexpected); } # expected if (exists $opts{'expected'}) { if ($gotexpected) { $self->_die("invalid test format: expected results included twice"); } my $expected = $opts{'expected'}; if (ref($expected) eq 'ARRAY') { my @exp = @$expected; $nexp = 0; foreach my $exp (@exp) { $nexp++; if (ref($exp) eq 'ARRAY') { $tests{$nexp}{'expected'} = $exp; } else { $tests{$nexp}{'expected'} = [$exp]; } } } else { my($g,%t); ($nexp,$g,%t) = $self->_parse($expected); if ($g) { $self->_die("invalid test format: expected results contain '=>'"); } foreach my $t (1..$nexp) { $tests{$t}{'expected'} = $t{$t}{'args'}; } } $gotexpected = 1; } if ($gotexpected && ($nexp != 1 && $nexp != $ntest)) { $self->_die("invalid test format: number expected results differs from number of tests"); } # func my $func; if (exists $opts{'func'}) { $func = $opts{'func'}; if (ref($func) ne 'CODE') { $self->_die("invalid test format: func must be a code reference"); } } # # Compare results # foreach my $t (1..$ntest) { $main::TI_NUM++; if ($skip) { $self->_skip($skip,$name); next; } if ($tests{$t}{'err'}) { $self->_not_ok($name); $self->diag($tests{$t}{'err'}); next; } my($op,@ret); # Test results if ($gotexpected) { # Do an 'is' test my @a = ('is'); push(@a,$func) if ($func); push(@a,$tests{$t}{'args'}); push(@a,($nexp == 1 ? $tests{'1'}{'expected'} : $tests{$t}{'expected'})); push(@a,$name); ($op,@ret) = $self->_is_result(@a); } else { # Do an 'ok' test my $result = $tests{$t}{'args'}; if (@$result == 1) { $result = $$result[0]; } ($op,@ret) = $self->_ok_result($result,$name); } # Print it out my($name,@diag); my $ok = 1; if ($op eq 'skip') { my $reason = shift(@ret); $self->_skip($reason); } elsif ($op eq 'pass') { ($name,@diag) = @ret; $self->_ok($name); } else { ($name,@diag) = @ret; $self->_not_ok($name); $ok = 0; } next if ( ($ok && $$self{'quiet'}) || (! $ok && $$self{'quiet'} == 2) ); foreach my $diag (@diag) { $self->_diag($diag); } } } ############################################################################### # TAP METHODS ############################################################################### sub _diag { my($self,$message) = @_; print '#' . ' 'x10 . "$message\n"; } sub _plan { my($self,$n,$reason) = @_; $reason = '' if (! $reason); if ($$self{'mode'} eq 'test') { # Test mode if (! $n) { $reason = '' if (! $reason); print "1..0 # Skipped $reason\n"; return; } print "1..$n\n"; } else { if (! $n) { print " All tests skipped: $reason\n"; } else { print " Epected number of tests: $n\n" unless ($$self{'quiet'}); } } } sub _ok { my($self,$name) = @_; $name = '' if (! $name); $name =~ s/\#//; $$self{'testsrun'} = 1; return if ($$self{'mode'} ne 'test' && $$self{'quiet'}); print "ok $main::TI_NUM" . ' 'x(8-length($main::TI_NUM)) . "$name\n"; if ($name =~ /^\d/ && $$self{'quiet'} != 2) { $self->_diag('It is strongly recommended that the name of a test not'); $self->_diag('begin with a digit so it will not be confused with the'); $self->_diag('test number.'); } } sub _not_ok { my($self,$name) = @_; $name = '' if (! $name); $name =~ s/\#//; $$self{'testsrun'} = 1; print "not ok $main::TI_NUM" . ' 'x(4-length($main::TI_NUM)) . "$name\n"; if ($$self{'abort'} == 2) { exit 1; } elsif ($$self{'abort'}) { $$self{'skipall'} = 'Tests aborted due to failed test'; } } sub _skip { my($self,$reason,$name) = @_; $name = '' if (! $name); $name =~ s/\#//; $$self{'testsrun'} = 1; return if ($$self{'mode'} ne 'test' && $$self{'quiet'}); print "ok $main::TI_NUM" . ' 'x(8-length($main::TI_NUM)) . ($name ? "$name " : "") . "# SKIPPED $reason\n"; } ############################################################################### # TEST PARSING METHODS ############################################################################### { my $l; # current line number my $sp_opt = qr/\s*/; # optional whitespace my $sp = qr/\s+/; # required whitespace my $lparen = qr/\(/; # opening paren my $lbrack = qr/\[/; # opening brack my $lbrace = qr/\{/; # opening brace my $q = qr/\'/; # single quote my $qq = qr/\"/; # double quote my $token = qr/\S+/; # a token of non-whitespace characters my $min_str = qr/.*?/; # a minimum length string my $results = qr/=>/; # the string to switch to results # We'll also need to match delimiters and other special characters that # signal the end of a token. The default delimiter is just whitespace, # both other end-of-token regular expressions will include closing # parens, delimiters, etc. # # The end-of-token regexp will return a match for a special character (if # any) that terminates the token. If a token ends a whitespace or EOL, # nothing is matched. # my $eot = qr/()(?:\s+|$)/; # Allowed delimiters is anything except () [] {} alphanumeric, # underscore, and whitespace. # my $delim = qr/[^\'\"\(\)\[\]\{\}a-zA-Z0-9_ \t]/; # This takes a string which may contain a partial or complete # descritpion of any number of tests, and parses it. # # The string is multiline, and tests must be separated from each other # by one or more blank lines. Lines starting with a pound sign (#) # are comments. # # A test may include arguments (or obtained results), expected results, # or both. # # Returns # ($n,$gotboth,%tests) # where # $n is the number of tests # $gotboth is 1 if both arguments and expected results are obtained # $tests{$i} is the i'th test. # sub _parse { my($self,$string) = @_; my $t = 0; my $gotboth = -1; my %tests = (); # Split on newlines $string = [ split(/\n/s,$string) ]; $t = 0; while (@$string) { my $test = $self->_next_test($string); last if (! @$test); # All tests must contain both args/results OR only one of them. my ($err,$both,$args,$results) = $self->_parse_test($test); if ($gotboth == -1) { $gotboth = $both; } elsif ($gotboth != $both) { $err = "Malformed test [$l]: expected results for some tests, not others"; } $t++; $tests{$t}{'err'} = $err; $tests{$t}{'args'} = $args; $tests{$t}{'expected'} = $results if ($gotboth); } return ($t,$gotboth,%tests); } # Get all lines up to the end of lines or a blank line. Both # signal the end of a test. # sub _next_test { my($self,$list) = @_; my @test; my $started = 0; while (1) { last if (! @$list); my $line = shift(@$list); $line =~ s/^\s*//; $line =~ s/\s*$//; # If it's a blank line, add it to the test. If we've # already done test lines, then this signals the end # of the test. Otherwise, this is before the test, # so keep looking. if ($line eq '') { push(@test,$line); next if (! $started); last; } # Comments are added to the test as a blank line. if ($line =~ /^#/) { push(@test,''); next; } push(@test,$line); $started = 1; } return [] if (! $started); return \@test; } # Parse an entire test. Look for arguments, =>, and expected results. # sub _parse_test { my($self,$test) = @_; my($err,$both,@args,@results); my $curr = 'args'; while (@$test) { last if (! $self->_test_line($test)); # Check for '=>' if ($self->_parse_begin_results($test)) { if ($curr eq 'args') { $curr = 'results'; } else { return ("Malformed test [$l]: '=>' found twice"); } next; } # Get the next item(s) to add. my($err,$match,@val) = $self->_parse_token($test,$eot); return ($err) if ($err); if ($curr eq 'args') { push(@args,@val); } else { push(@results,@val); } } $both = ($curr eq 'results' ? 1 : 0); return ("",$both,\@args,\@results); } # Makes sure that the first line in the test contains # something. Blank lines are ignored. # sub _test_line { my($self,$test) = @_; while (@$test && (! defined($$test[0]) || $$test[0] eq '')) { shift(@$test); $l++; next; } return 1 if (@$test); return 0; } # Check for '=>'. # # Return 1 if found, 0 otherwise. # sub _parse_begin_results { my($self,$test) = @_; return 1 if ($$test[0] =~ s/^ $sp_opt $results $eot //x); return 0; } # Gets the next item to add to the current list. # # Returns ($err,$match,@val) where $match is the character that # matched the end of the current element (either a delimiter, # closing character, or nothing if the element ends on # whitespace/newline). # sub _parse_token { my($self,$test,$EOT) = @_; my($err,$found,$match,@val); { last if (! $self->_test_line($test)); # Check for quoted ($err,$found,$match,@val) = $self->_parse_quoted($test,$EOT); last if ($found || $err); # Check for open ($err,$found,$match,@val) = $self->_parse_open_close($test,$EOT,$lparen,')'); last if ($found || $err); ($err,$found,$match,@val) = $self->_parse_open_close($test,$EOT,$lbrack,']'); last if ($err); if ($found) { @val = ( [@val] ); last; } ($err,$found,$match,@val) = $self->_parse_open_close($test,$EOT,$lbrace,'}'); last if ($err); if ($found) { if (@val % 2 == 0) { # Even number of elements @val = ( {@val} ); } elsif (! defined $val[$#val] || $val[$#val] eq '') { # Odd number of elements with nothing as the # last element. pop(@val); @val = ( {@val} ); } else { # Odd number of elements not supported for a hash $err = "Malformed test [$l]: hash with odd number of elements"; } last; } # Check for some other token ($err,$found,$match,@val) = $self->_parse_simple_token($test,$EOT); last if ($err); last; } return ($err) if ($err); return ("Malformed test: unable to parse") if (! $found); foreach my $v (@val) { $v = '' if ($v eq '__blank__'); $v = undef if ($v eq '__undef__'); $v =~ s/__nl__/\n/g if ($v); } return (0,$match,@val) if ($found); return (0,0); } ### ### The next few routines parse parts of the test. Each of them ### take as arguments: ### ### $test : the listref containing the unparsed portion of ### the test ### $EOT : the end of a token ### ### + other args as needed. ### ### They all return: ### ### $err : a string containing the error (if any) ### $found : 1 if something matched ### $match : the character which terminates the current ### token signaling the start of the next token ### (this will either be a delimiter, a closing ### character, or nothing if the string ended on ### whitespace or a newline) ### @val : the value (or values) of the token ### # Check for a quoted string # 'STRING' # "STRING" # The string must be on one line, and everything up to the # closing quote is included (the quotes themselves are # stripped). # sub _parse_quoted { my($self,$test,$EOT) = @_; if ($$test[0] =~ s/^ $sp_opt $q ($min_str) $q $EOT//x || $$test[0] =~ s/^ $sp_opt $qq ($min_str) $qq $EOT//x) { return (0,1,$2,$1); } elsif ($$test[0] =~ /^ $sp_opt $q/x || $$test[0] =~ /^ $sp_opt $qq/x) { return ("Malformed test [$l]: improper quoting"); } return (0,0); } # Parses an open/close section. # # ( TOKEN TOKEN ... ) # (, TOKEN, TOKEN, ... ) # # $open is a regular expression matching the open, $close is the # actual closing character. # # After the closing character must be an $EOT. # sub _parse_open_close { my($self,$test,$EOT,$open,$close) = @_; # See if there is an open my($del,$newEOT); if ($$test[0] =~ s/^ $sp_opt $open ($delim) $sp_opt //x) { $del = $1; $newEOT = qr/ $sp_opt ($|\Q$del\E|\Q$close\E) /x; } elsif ($$test[0] =~ s/^ $sp_opt $open $sp_opt //x) { $del = ''; $newEOT = qr/ ($sp_opt $|$sp_opt \Q$close\E|$sp) /x; } else { return (0,0); } # If there was, then we need to read tokens until either: # the string is all used up => error # $close is found my($match,@val); while (1) { # Get a token. We MUST find something valid even if it is # an empty list followed by the closing character. my($e,$m,@v) = $self->_parse_token($test,$newEOT); return ($e) if ($e); $m =~ s/^$sp//; # If we ended on nothing, and $del is something, then we # ended on a newline with no delimiter. The next line MUST # start with a delimiter or close character or the test is # invalid. if (! $m && $del) { if (! $self->_test_line($test)) { return ("Malformed test [$l]: premature end of test"); } if ($$test[0] =~ s/^ $sp_opt $newEOT //x) { $m = $1; } else { return ("Malformed test [$l]: unexpected token (expected '$close' or '$del')"); } } # Figure out what value(s) were returned if ($m eq $close && ! @v) { push(@val,''); } else { push(@val,@v); } last if ($m eq $close); } # Now we need to find out what character ends this token: if ($$test[0] eq '') { # Ended at EOL return (0,1,'',@val); } if ($$test[0] =~ s/^ $sp_opt $EOT //x) { return (0,1,$1,@val); } else { return ("Malformed test [$l]: unexpected token"); } } # Checks for a simple token. # sub _parse_simple_token { my($self,$test,$EOT) = @_; $$test[0] =~ s/^ $sp_opt (.*?) $EOT//x; return (0,1,$2,$1); } } 1; # Local Variables: # mode: cperl # indent-tabs-mode: nil # cperl-indent-level: 3 # cperl-continued-statement-offset: 2 # cperl-continued-brace-offset: 0 # cperl-brace-offset: 0 # cperl-brace-imaginary-offset: 0 # cperl-label-offset: 0 # End: Test-Inter-1.06/lib/Test/Inter.pod0000644000175000001440000010643312404064450016265 0ustar sulbeckusers# Copyright (c) 2010-2014 Sullivan Beck. All rights reserved. # This program is free software; you can redistribute it and/or modify it # under the same terms as Perl itself. =pod =head1 NAME Test::Inter - framework for more readable interactive test scripts =head1 DESCRIPTION This is another framework for writing test scripts. Some of the syntax is loosely inspired by Test::More, and has most of it's functionality, but it is not a drop-in replacement. Test::More (and other existing test frameworks) suffer from two weaknesses, both of which have prevented me from ever using them: None offer the ability to access specific tests in a reasonably interactive fashion None offer the ability to write the tests in whatever format would make the tests the most readable The way I write and use test scripts, existing Test::* modules are not nearly as useful as they could be. Test scripts written using Test::More work fine when running as part of the test suite, but debugging an individual test requires extra steps, and the tests themselves are not as readable as they should be. I do most of my debugging using test scripts. When I find a bug, I write a test case for it, debug it using the test script, and then leave the test there so the bug won't come back (hopefully). Since I use test scripts in two ways (part of a standard test suite and to run the scripts in some interactive way to debug problems), I want to be able to do the following trivially: =over 4 =item B If I'm running the test script interactively (perhaps in the debugger), there are several common functions that I want to have available, including: Run only a single test, or a subset of tests Set a breakpoint in the debugger to run up to the start of the Nth test =item B When running a test script as part of a test suite, the pass/fail status is really the only thing of interest. You just want to know if the module passes all the tests. When running interactively, additional information may allow me to quickly track down the problem without even resorting to a debugger. If a test fails, I almost always want to see why it failed if I'm running it interactively. If reasonable, I want to see a list of what was input, what was output, and what was expected. =back The other feature that I wanted in a test suite is the ability to define the tests in a format that is natural and readable FOR THE TESTS. In almost every case, it is best to think of a test script as consisting of two separate parts: a script part, and a test part. The script part of a test script is the least important part! It's usually fairly trivial, rarely needs to be changed, and is not the focus of the test script. The tests part of the script IS the important part, and these should be expressed in a form that is natural to them, easy to maintain, easy to read, and easy to modify, and none of these should involve modifying the script portion of the test script in general. Because the content of the tests is the important part of the script, the emphasis should be in making them more readable, even at the expense of the script portion. As a general rule, if the script portion of the test script obscures the tests in any way, it's not written correctly! The solution to this is well understood, and is common to many other systems where you are mixing two "languages". The task of correctly specifying both the tests and the test script is virtually identical to the task of creating a PHP script which consists of a mixture of PHP and HTML, or the task of creating a template file using some templating system where the file consists of a mixture of text to be displayed and templating commands. It is well understood in each of these cases that the more the two "languages" are interwoven, the less readable both are, and the harder it is to maintain. The more you are able to separate the two, the easier both are to read and maintain. As often as possible, I want the tests to be written in some sort of text format which can be easily read as a table with no perl commands interspersed. I want to the freedom to define the tests in one section (a long string, the DATA section, or even in a separate file) which is easily readable. This may introduce the necessity of parsing it, but it makes it significantly easier to maintain the tests. This flexibility makes it much easier to read the tests (as opposed to the script) which is the fundamental content of a test script. To illustrate some of this, in Test::More, a series of tests might be specified as: # test 1 $result = func("apples","bushels"); is($result, "enough"); # test 2 $result = func("grapefruit","tons"); is($result, "enough"); # test 3 $result = func("oranges","boatloads"); is($result, "insufficient"); Thinking about the features I want that I listed above, there are several difficulties with this. =over 4 =item B Say you ran the test suite, and test 3 failed. To debug it using a traditional Test::* module, you have to open up the test script, find the 3rd test (which won't necessarily be trivial, especially if you're talking about the 103rd test), and then run the debugger setting a break point at that line number. This sequence of steps is typically not very hard (especially when the test script is as simple as the example above), but it's still lots of steps that serve only to break your train of thought. How much better to be able to set a break point in the function that actually performs the test for the Nth test. It would also be nice to be able to skip the first two tests... perhaps they take a long time to run, and I want to get right to work on test 3. You can do this easily too by setting the $::TI_START variable. =item B It's difficult to read the tests individually in this script because there is too much perl code among them, and virtually impossible to look at them as a whole. It is true that looking at this particular example, it is very simple... but the script ISN'T the content you're interested in (and bear in mind that many test scripts are nowhere near this simple). The REAL content of this script are the tests, which consist of the function arguments and the expected result. Although it's not impossible to see each of these in the script above, it's not in a format that is conducive to studying the tests, and especially not for examining the list of tests as a whole. =back Now, look at an alternate way of specifying the tests using this module: $tests = " apples bushels => enough grapefruit tons => enough oranges boatloads => insufficient "; $o->tests(tests => $tests, func => \&func); Here, it's easy to see the list of tests, and adding additional tests is a breeze. This module supports a number of methods for defining tests, so you can use whichever one is most convenient (including methods that are identical to Test::More if that really is the best method). In addition, the following debugger command works as desired: b func ($::TI_NUM==3) and you're ready to debug (assuming that the test function is named 'func'). =head1 CREATING A TEST Every test may have several pieces of information: =over 4 =item B Every test is automatically assigned a number, but it may be useful to specify a name of a test (which is actually a short description of the test). Whenever a test result is reported, the name will be given (if one was specified). The name may not have a '#' in it. The name is completely optional, but makes the results more readable. =item B In order to test something, you need to know what result was expected (or in some cases, what result was NOT expected). =item B You also need to know the results that you're comparing to the expected results. This can be obtained by simply working with a set of results, or a function name and a set of arguments to pass to it. =item B It is useful to be able to specify state information at the start of the test suite (for example, to see if certain features are available), and some tests may only run if those conditions are met. If no conditions are set for a test, it will always run. =item B Some tests may be marked as 'todo' tests. These are test which are allowed to fail (meaning that they have been put in place for an as-yet unimplemented feature). Since it is expected that the test will fail, the test suite will still pass, even if these tests fail. The tests will still run and if they pass, a message is issued saying that the feature is now implemented, and the tests should be graduated to non-todo state. =back =head1 BASE METHODS =over 4 =item B $o = new Test::Inter [$name] [%options]; This creates a new test framework. There are several options which may be used to specify which tests are run, how they are run, and what output is given. The entire test script can be named by passing in $name. Options can be passed in as a hash of ($opt,$val) pairs. Options can be set in four different ways. First, you can pass in an ($opt,$val) pair in the new method. Second, you can set an environment variable (which overrides any value passed to the new method). Third, you can set a global variable (which overrides both the environment variable and options passed to the new method). Forth, you can call the appropriate method to set the option. This overrides all other methods. Each of the allowed options are described below in the following base methods: start end testnum plan abort quiet mode skip_all width =item B $o->version(); Returns the version of the module. =item B $o = new Test::Inter 'start' => $N; $o->start($N) To define which test you want to start with, pass in an ($opt,$val) pair of ('start',N), set an environment variable TI_START=N, or a global variable $::TI_START=N. When the start test is defined, most tests numbered less than N are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. One class of tests IS still executed. Tests run using the require_ok or use_ok methods (to test the loading of modules) are still run. If no value (or a value of 0) is used, it defaults to the first test. =item B $o = new Test::Inter 'end' => $M; $o->end($M); To define which test you want to end with, pass in an ($opt,$val) pair of ('end',M), set an environment variable TI_END=M, or set a global variable $::TI_END=M. When the end test is defined, all tests numbered more than M are completely ignored. If the tests are being run quietly (see the quiet method below), nothing is printed out for these tests. Otherwise, a skip message is printed out. If no value is given, it defaults to 0 (which means that all remaining tests are run). =item B $o = new Test::Inter 'testnum' => $N; $o->testnum($N); This is used to run only a single test. It is equivalent to setting both the start and end tests to $N. =item B =item B $o = new Test::Inter 'plan' => $N; $o->plan($n); $o->done_testing(); $o->done_testing($n); The TAP API (the 'language' used to run a sequence of tests and see which ones failed and which ones passed) requires a statement of the number of tests that are expected to run. This statement can appear at the start of the test suite, or at the end. If you know in advance how many tests should run in the test script, you can pass in a non-zero integer in a ('plan',N) pair to the new method, or set the TI_PLAN environment variable or the $::TI_PLAN global variable, or call the plan method. If you know how many tests should run at the end of the test script, you can pass in a non-zero integer to the done_testing method. Frequently, you don't really care how many tests are in the script (especially if new tests are added on a regular basis). In this case, you still need to include a statement that says that the number of tests expected is however many were run. To do this, call the done_testing method with no argument. NOTE: if the plan method is used, it MUST be used before any tests are run (including those that test the loading of modules). If the done_testing method is used, it MUST be called after all tests are run. You must specify a plan or use a done_testing statement, but you cannot do both. It is NOT strictly required to set a plan if the script is only run interactively, so if for some reason this module is used for test scripts which are not part of a standard perl test suite, the plan and done_testing statements are optional. As a matter of fact, the script will run just fine without them... but a perl installer will report a failure in the test suite. =item B $o = new Test::Inter 'abort' => 0/1/2; $o->abort(0/1/2); The abort option can be set using an ('abort',0/1/2) option pair, or by setting the TI_ABORT environment variable, or the $::TI_ABORT global variable. If this is set to 1, the test script will run unmodified until a test fails. At that point, all remaining tests will be skipped. If it is set to 2, the test script will run until a test fails at which point it will exit with an error code of 1. In both cases, todo tests will NOT trigger the abort behavior. =item B $o = new Test::Inter 'quiet' => 0/1/2; $o->quiet(0/1/2); The quiet option can be set using an ('quiet',0/1/2) option pair, or by setting the TI_QUIET environment variable, or the $::TI_QUIET global variable. If this is set to 0 (the default), all information will be printed out. If it is set to 1, some optional information will not be printed. If it is set to 2, all optional information will not be printed. =item B $o = new Test::Inter 'mode' => MODE; $o->mode(MODE); The mode option can be set using a ('mode',MODE) option pair, or by setting the TI_MODE environment variable, or the $::TI_MODE global variable. Currently, MODE can be 'test' or 'inter' meaning that the script is run as part of a test suite, or interactively. When run in test mode, it prints out the results using the TAP grammar (i.e. 'ok 1', 'not ok 3', etc.). When run in interactive mode, it prints out results in a more human readable format. =item B $o = new Test::Inter 'width' => WIDTH; $o->width(WIDTH); The width option can be set using a ('width',WIDTH) option pair, or by setting the TI_WIDTH environment variable, or the $::TI_WIDTH global variable. WIDTH is the width of the terminal (for printing out failed test information). It defaults to 80, but it can be set to any width (and lines longer then this are truncated). If WIDTH is set to 0, no truncation is done. =item B $o = new Test::Inter 'skip_all' => REASON; $o->skip_all(REASON); The skip_all option can be set using an ('skip_all',REASON) option pair, or by setting the TI_SKIP_ALL environment variable, or the $::TI_SKIP_ALL global variable. If this is set, the entire test script will be skipped for the reason given. This must be done before any test is run, and before any plan number is set. The skip_all can also be called at any point during the script (i.e. after tests have been run). In this case, all remaining scripts will be skipped. $o->skip_all(REASON,FEATURE,FEATURE,...); $o->skip_all('',FEATURE,FEATURE,...); This will skip all tests (or all remaining tests) unless all features are available. REASON can be entered as an empty string and the reason the tests are skipped will be a message about the missing feature. =item B $o->feature($feature,$val); This defines a feature. If $val is non-zero, the feature is available. Otherwise it is not. =item B =item B $o->diag($message); $o->note($message); Both of these print an optional message. Messages printed with the note method are always optional and will be omitted if the quiet option is set to 1 or 2. Messages printed with the diag method are optional and will not be printed if the quiet option is set to 2, but they will be printed if the quiet method is set to 1. =item B Occasionally, it may be necessary to know the directory where the tests live (for example, there may be a config or data file in there). This method will return the directory. =back =head1 METHODS FOR LOADING MODULES Test scripts can load other modules (using either the perl 'use' or 'require' commands). There are three different modes for doing this which determine how this is done. =over 4 =item B By default, this is used to test for a module that is required for all tests in the test script. Loading the module is treated as an actual test in the test suite. The test is to determine whether the module is available and can be loaded. If it can be loaded, it is, and it is reported as a successful test. If it cannot be loaded, it is reported as a failed test. In the result of a failed test, all remaining tests will be skipped automatically (except for other tests which load modules). =item B In feature mode, loading the module is not treated as a test (i.e. it will not print out an 'ok' or 'not ok' line. Instead, it will set a feature (named the same as the module) which can be used to determine whether other tests should run or not. =item B In a few very rare cases, we may want to test for a module but expect that it not be present. This is the exact opposite of the 'required' mode. Successfully loading the module is treated as a test failure. In the event of a failure, all remaining tests will be skipped. =back The methods available are: =over 4 =item B $o->require_ok($module [,$mode]); This is used to load a module using the perl 'require' function. If $mode is not passed in, the default mode (required) is used to test the existence of the module. If $mode is passed in, it must be either the string 'forbid' or 'feature'. If $mode is 'feature', a feature named $module is set if the module was able to be loaded. =item B $o->use_ok(@args [,$mode]); This is used to load a module with 'use', or check a perl version. BEGIN { $o->use_ok('5.010'); } BEGIN { $o->use_ok('Some::Module'); } BEGIN { $o->use_ok('Some::Module',2.05); } BEGIN { $o->use_ok('Some::Module','foo','bar'); } BEGIN { $o->use_ok('Some::Module',2.05,'foo','bar'); } are the same as: use 5.010; use Some::Module; use Some::Module 2.05; use Some::Module qw(foo bar); use Some::Module 2.05 qw(foo bar); Putting the use_ok call in a BEGIN block allows the functions to be imported at compile-time and prototypes are properly honored. You'll also need to load the Test::Inter module, and create the object in a BEGIN block. $mode acts the same as in the require_ok method. =back =head1 METHODS FOR RUNNING TEST There are several methods for running tests. The ok, is, and isnt methods are included for those already comfortable with Test::More and wishing to stick with the same format of test script. The tests method is the suggested method though since it makes use of the full power of this module. =over 4 =item B $o->ok(TESTS); A test run with ok looks at a result, and if it evaluates to 0 (or false), it fails. If it evaluates to non-zero (or true), it passes. These tests do not require you to specify the expected results. If expected results are given, they will be compared against the result received, and if they differ, a diagnostic message will be printed, but the test will still succeed or fail based only on the actual result produced. These tests require a single result and either zero or one expected results. To run a single test, use any of the following: $o->ok(); # always succeeds $o->ok($result); $o->ok($result,$name); $o->ok($result,$expected,$name); $o->ok(\&func); $o->ok(\&func,$name); $o->ok(\&func,$expected,$name); $o->ok(\&func,\@args); $o->ok(\&func,\@args,$name); $o->ok(\&func,\@args,$expected,$name); If $result is a scalar, the test passes if $result is true. If $result is a list reference, and the list is either empty, or the first element is a scalar), the test succeeds if the list contains any values (except for undef). If $result is a hash reference, the test succeeds if the hash contains any key with a value that is not undef. If \&func and \@args are passed in, then $result is generated by passing @args to &func and behaves identically to the calls where $result is passed in. If \&func is passed in but no arguments, the function takes no arguments, but still produces a result. $result may be a scalar, list reference, or hash reference. If it is a list reference, the test passes is the list contains any defined values. If it is a hash reference, the test passes if any of the keys contain defined values. If an expected value is passed in and the result does not match it, a diagnostic warning will be printed, even if the test passes. =item B =item B $o->is(TESTS); $o->isnt(TESTS); A test run with B looks at a result and tests to see if it is identical to an expected result. If it is, the test passes. Otherwise it fails. In the case of a failure, a diagnostic message will show what result was actually obtained and what was expected. A test run with B looks at a result and tests to see if the result obtained is different than an expected result. If it is different, the test passes. Otherwise it fails. The is method can be called in any of the following ways: $o->is($result,$expected); $o->is($result,$expected,$name); $o->is(\&func,$expected); $o->is(\&func,$expected,$name); $o->is(\&func,\@args,$expected); $o->is(\&func,\@args,$expected,$name); The B method can be called in exactly the same way. As with the ok method, the result can be a scalar, hashref, or listref. If it is a hashref or listref, the entire structure must match the expected value. =item B $o->tests($opt=>$val, $opt=>$val, ...) The options available are described in the following section. =item B $o->file($func,$input,$outputdir,$expected,$name [,@args]); Sometimes it may be easiest to store the input, output, and expected output from a test in a text file. In this case, each line of output will be treated as a single test, so the output and expected output must match up exactly. $func is a reference to a function which will produce a temporary output file. If $input is specified, it is the name of the input file. If it is empty, no input file will be used. The input file can be fully specified, or it can be relative to the test directory. If $outputdir is passed in, it is the directory where the output file will be written. It can be fully specified, or relative to the test directory. If $outputdir is left blank, the temporary file will be written to the test directory. $expected is the name of a file which contains the expected output. It can be fully specified, or it will be checked for in the test directory. $name is the name of this series of tests. @args are extra arguments to pass to the test function. The function will be called with the arguments: &$func( [$input,] $output,@args); $input is only passed in if it was passed in to this method. If no input file is specified, nothing will be passed to the function. $output is the name of a temporary file where the output will be written to. =back =head1 USING THE TESTS METHOD It is expected that most tests (except for those that load a module) will be run using the tests method called as: $o->tests($opt => $val, $opt => $val, ...); The following options are available: =over 4 =item B name => NAME This sets the name of this set of tests. All tests will be given the same name. =item B =item B =item B In order to specify a series of tests, you have to specify either a function and a list of arguments, or a list of results. Specifying the function and list of arguments can be done using the pair: func => \&FUNCTION tests => TESTS If the func option is not set, tests contains a list of results. A list of expected results may also be given. They can be included in the tests => TESTS option or included separately as: expected => RESULTS The way to specify these are covered in the next section SPECIFYING THE TESTS. =item B =item B feature => [FEATURE1, FEATURE2, ...] disable => [FEATURE1, FEATURE2, ...] The default set of tests to run is determined using the start, end, and skip_all methods discussed above. Using those methods, a list of tests is obtained, and it is expected that these will run. The feature and disable options modify the list. If the feature option is included, the tests given in this call will only run if ALL of the features listed are available. If the disable option is included, the tests will be run unless ANY of the features listed are available. =item B skip => REASON Skip these tests for the reason given. =item B todo => 0/1 Setting this to 1 says that these tests are allowed to fail. They represent a feature that is not yet implemented. If the tests succeed, a message will be printed notifying the developer that the tests are now ready to promote to actual use. =back =head1 SPECIFYING THE TESTS A series of tests can be specified in two different ways. The tests can be written in a very simple string format, or stored as a list. Demonstrating how this can be done is best done by example, so let's say that there is a function (func) which takes two arguments, and returns a single value. Let's say that the expected output (and the actual output) from 3 different sets of arguments is: Input Expected Output Actual Output ----- --------------- ------------- 1,2 a a 3,4 b x 5,6 c c (so in this case, the first and third tests pass, but the 2nd one will fail). Specifying these tests as lists could be done as: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ [a], [b], [c] ], ); Here, the tests are stored as a list, and each element in the list is a listref containing the set of arguments. If the func option is not passed in, the tests option is set to a list of results to compare with the expected results, so the following is equivalent to the above: $o->tests( tests => [ [a], [x], [c] ], expected => [ [a], [b], [c] ], ); If an argument (or actual result) or an expected result is only a single value, it can be entered as a scalar instead of a list ref, so the following is also equivalent: $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => [ a, b, [c] ], ); The only exception to this is if the single value is itself a list reference. In this case it MUST be included as a reference. In other words, if you have a single test, and the expected value for this test is a list reference, it must be passed in as: expected => [ [ \@r ] ] NOT as: expected => [ \@r ] Passing in a set of expected results is optional. If none are passed in, the tests are treated as if they had been passed to the 'ok' method (i.e. if they return something true, they pass, otherwise they fail). The second way to specify tests is as a string. The string is a multi-line string with each tests being separate from the next test by a blank line. Comments (lines which begin with '#') are allowed, and are ignored. Whitespace at the start and end of the line is ignored. The string may contain the results directly, or results may be passed in separately. For example, the following all give the same sets of tests as the example above: $o->tests( func => &func, tests => " # Test 1 1 2 => a # Test 2 3 4 => b 5 6 => c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a], [b], [c] ] ); $o->tests( func => &func, tests => [ [1,2], [3,4], [5,6] ], expected => " a b c ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => " a b c ", ); The expected results may also consist of only a single set of results (in this case, it must be passed in as a listref). In this case, all of the tests are expected to have the same results. So, the following are equivalent: $o->tests( func => &func, tests => " 1 2 => a b 3 4 => a b 5 6 => a b ", ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => [ [a, b] ], ); $o->tests( func => &func, tests => " 1 2 3 4 5 6 ", expected => "a b", ); The number of expected values must either be 1 (i.e. all of the tests are expected to produce the same value) or exactly the same number as the number of tests. The parser is actually quite powerful, and can handle multi-line tests, quoted strings, and nested data structures. The test may be split across any number of lines, provided there is not a completely blank line (which signals the end of the test), so the following are equivalent: tests => "a b c", tests => "a b c", Arguments (or expected results) may include data structures. For example, the following are equivalent: tests => "[ a b ] { a 1 b 2 }" tests => [ [ [a,b], { a=>1, b=>2 } ] ] Whitespace is mostly optional, but there is one exception. An item must end with some kind of delimiter, so the following will fail: tests => "[a b][c d]" The first element (the list ref [a b]) must be separated from the second element by the delimiter (which is whitespace in this case), so it must be written as: tests => "[a b] [c d]" As already demonstrated, hashrefs and listrefs may be included and nested. Elements may also be included inside parens, but this is optional since all arguments and expected results are already treated as lists, so the following are equivalent: tests => "a b c" tests => "(a b) c" Although parens are optional, they may make things more readable, and allow you to use something other than whitespace as the delimiter. If the character immediately following the opening paren, brace, or bracket is a punctuation mark, then it is used as the delimiter instead of whitespace. For example, the following are all equivalent: [ a b c ] [a b c] [, a,b,c ] [, a, b, c ] A delimiter is a single character, and the following may not be used as a delimiter: any opening/closing characters () [] {} single or double quotes alphanumeric characters underscore Whitespace (including newlines) around the delimiter is ignored, so the following is valid: [, a, b, c ] Two delimiters next to each other or a trailing delimiter produce an empty string. "(,a,b,)" => (a, b, '') "(,a,,b)" => (a, '', b) Hashrefs may be specified by braces and the following are equivalent: { a 1 b 2 } {, a,1,b,2 } {, a,1,b,2, } Note that a trailing delimiter is ignored if there are already an even number of elements, or an empty string otherwise. Nested structures are allowed: "[ [1 2] [3 4] ]" For example, $o->tests( func => &func, tests => "a [ b c ] { d 1 e 2 } => x y" ); is equivalent to: $o->tests( func => &func, tests => [ [a, [b,c], {d=>1,e=>2}] ], results => [ [x,y] ], ); Any single value can be surrounded by single or double quotes in order to include the delimiter. So: "(, a,'b,c',e )" is equivalent to: "( a b,c e )" Any single value can be the string '__undef__' which will be turned into an actual undef. If the value is '__blank__' it is turned into an empty string (''), though it can also be specified as '' directly. Any value can have an embedded newline by including a __nl__ in the value, but the value must be written on a single line. Expected results are separated from arguments by ' => '. =head1 ENVIRONMENT VARIABLES To summarize the information above, the following environment variables (and main:: variables) exist. Each can be set in a perl script as a variable in the main namespace: $::TI_END or as an environment variable: $ENV{TI_END} =over 4 =item TI_START Set this to define the test you want to start with. Example: If you have a perl test script (my_test_script) and you want to start running it at test 12, run the following shell commands: TI_START=12 ./my_test_script.t =item TI_END Set this to define the test you want to end with. =item TI_TESTNUM Set this to run only a single test =item TI_QUIET How verbose the test script is. =item TI_MODE How the output is formatted. =item TI_WIDTH The width of the terminal. =item TI_NOCLEAN When running a file test, the temporary output file will not be removed if this is set. =back =head1 HISTORY The history of this module dates back to 1996 when I needed to write a test suite for my Date::Manip module. At that time, none of the Test::* modules currently available in CPAN existed (the earliest ones didn't come along until 1998), so I was left completely on my own writing my test scripts. I wrote a very basic version of my test framework which allowed me to write all of the tests as a string, it would parse the string, count the tests, and then run them. Over the years, the functionality I wanted grew, and periodically, I'd go back and reexamine other Test frameworks (primarily Test::More) to see if I could replace my framework with an existing module... and I've always found them wanting, and chosen to extend my existing framework instead. As I've written other modules, I've wanted to use the framework in them too, so I've always just copied it in, but this is obviously tedious and error prone. I'm not sure why it took me so long... but in 2010, I finally decided it was time to rework the framework in a module form. I loosely based my module on Test::More. I like the functionality of that module, and wanted most of it (and I plan on adding more in future versions). So this module uses some similar syntax to Test::More (though it allows a great deal more flexibility in how the tests are specified). One thing to note is that I may have been able to write this module as an extension to Test::More, but after looking into that possibility, I decided that it would be faster to not do that. I did "borrow" a couple of routines from it (though they've been modified quite heavily) as a starting point for a few of the functions in this module, and I thank the authors of Test::More for their work. =head1 KNOWN BUGS AND LIMITATIONS None known. =head1 SEE ALSO Test::More - the 'industry standard' of perl test frameworks =head1 LICENSE This script is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 AUTHOR Sullivan Beck (sbeck@cpan.org) =cut Test-Inter-1.06/examples/0000755000175000001440000000000012404053065014622 5ustar sulbeckusersTest-Inter-1.06/examples/ok0000755000175000001440000000254212404053065015164 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; print "The following tests should all succeed\n\n"; $o->ok(); $o->ok( 1 == 1 ); $o->ok( 1 == 1, "Basic test" ); $o->ok( 1 == 1, 1, "Basic test" ); $o->ok( 1 == 1, 2, "Basic test" ); sub func_false { return 0; } sub func_true { return 1; } sub func { my($a,$b) = @_; return $a == $b; } $o->ok( \&func_true ); $o->ok( \&func_true, "True test" ); $o->ok( \&func_true, 1, "True test" ); $o->ok( \&func_true, 2, "True test" ); $o->ok( \&func, [1,1]); $o->ok( \&func, [1,1], "Func test" ); $o->ok( \&func, [1,1], 1, "Func test" ); $o->ok( \&func, [1,1], 2, "Func test" ); $o->ok( [ a,b ], [ a,b ], "List test" ); $o->ok( [ a,b ], [ a,c ], "List test (non-identical)" ); $o->ok( { a => 1, b => 2 }, { a => 1, b => 2 }, "Hash test" ); $o->ok( { a => 1, b => 2 }, { a => 1, b => 3 }, "Hash test (non-identical)" ); print "\nThe following tests should all fail\n\n"; $o->ok( 1 == 2 ); $o->ok( 1 == 2, "Basic test" ); $o->ok( 1 == 2, 1, "Basic test" ); $o->ok( \&func_false ); $o->ok( \&func_false, "False test" ); $o->ok( \&func_false, 1, "False test" ); $o->ok( \&func, [1,2]); $o->ok( \&func, [1,2], "Func test" ); $o->ok( \&func, [1,2], 1, "Func test" ); $o->ok( [], [ a,b ], "List test" ); $o->ok( [undef], [ a,b ], "List test" ); $o->ok( { a => undef }, { a => 1 }, "Hash test" ); Test-Inter-1.06/examples/use_ok0000755000175000001440000000043012404053065016032 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; our($o); BEGIN { print "The first test will fail, all others will be skipped.\n\n"; use Test::Inter; $o = new Test::Inter; } BEGIN { $o->use_ok('Xxx::Yyy'); } $o->ok(); $o->ok( 1 == 1 ); $o->ok( 1 == 2 ); $o->done_testing(); Test-Inter-1.06/examples/tests0000755000175000001440000000204212404053065015710 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; print "The following tests test some improperly formed tests\n\n"; sub func1 { my($a,$b) = @_; if ($a eq 'a' && $b eq 'b') { return 1; } elsif ($a eq 'c' && $b eq 'd') { return 2; } elsif ($a eq 'e' && $b eq 'f') { return 3; } } print "The 2nd one fails with 'expected results for some, not others\n\n"; $o->tests(func => \&func1, tests => "a b => 1 c d e f => 3"); print "\n\nFails with '=>' found twice\n\n"; $o->tests(func => \&func1, tests => "a b => 1 => 1"); print "\n\nFails with odd number of elements in hash\n\n"; $o->tests(func => \&func1, tests => "{ a b c } => 1"); print "\n\nFails with improper quoting\n\n"; $o->tests(func => \&func1, tests => "a 'b => 1"); print "\n\nFails with unable to parse\n\n"; $o->tests(func => \&func1, tests => "(a b c"); print "\n\nFails with unexpected token\n\n"; $o->tests(func => \&func1, tests => "(a b c)(d e) => 1"); Test-Inter-1.06/examples/plan0000755000175000001440000000031112404053065015475 0ustar sulbeckusers#!/usr/bin/perl use strict; use warnings; use Test::Inter; my $o = new Test::Inter; print "The following script will fail due to multiple plans\n\n"; $o->plan(2); $o->plan(5); $o->done_testing(); Test-Inter-1.06/examples/is0000755000175000001440000000112512404053065015162 0ustar sulbeckusers#!/usr/bin/perl use Test::Inter; $o = new Test::Inter; print "The following tests should all succeed\n\n"; $o->is ( [ a,b ], [ a,b ], "List test" ); $o->isnt( [ a,b ], [ a,c ], "List test" ); $o->is ( { a => 1, b => 2 }, { a => 1, b => 2 }, "Hash test" ); $o->isnt( { a => 1, b => 2 }, { a => 1, b => 3 }, "Hash test" ); print "\nThe following tests should all fail\n\n"; $o->isnt( [ a,b ], [ a,b ], "List test" ); $o->is ( [ a,b ], [ a,c ], "List test" ); $o->isnt( { a => 1, b => 2 }, { a => 1, b => 2 }, "Hash test" ); $o->is ( { a => 1, b => 2 }, { a => 1, b => 3 }, "Hash test" ); Test-Inter-1.06/INSTALL0000644000175000001440000000142512466660012014043 0ustar sulbeckusersFor instructions on installing this, or any other perl module in a UNIX environment, please refer to: http://www.cpan.org/modules/INSTALL.html http://learn.perl.org/faq/perlfaq8.html#How-do-I-install-a-module-from-CPAN- For instructions in a Windows environment running ActivePerl, refer to one of the following (depending on your version of perl): http://docs.activestate.com/activeperl/5.6/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.8/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.10/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.12/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.14/faq/ActivePerl-faq2.html http://docs.activestate.com/activeperl/5.16/faq/ActivePerl-faq2.html Test-Inter-1.06/.travis.yml0000644000175000001440000000014112404056420015107 0ustar sulbeckuserslanguage: perl perl: - "5.20" - "5.18" - "5.16" - "5.14" - "5.12" - "5.10" - "5.8" Test-Inter-1.06/MANIFEST0000644000175000001440000000073512466660013014147 0ustar sulbeckusers.travis.yml Build.PL Changes examples/is examples/ok examples/plan examples/tests examples/use_ok INSTALL internal/build.yaml internal/ispell.isp internal/NOTES internal/TODO lib/Test/Inter.pm lib/Test/Inter.pod LICENSE Makefile.PL MANIFEST This list of files META.json README t/file.1.exp t/file.2.exp t/file.2.in t/file.t t/is.t t/ok.t t/pod.t t/pod_coverage.t t/require_ok.t t/runtests t/runtests.bat t/skip_all.t t/tests.t t/use_ok.1.t t/use_ok.2.t t/use_ok.3.t META.yml Test-Inter-1.06/Changes0000644000175000001440000000151512466657760014325 0ustar sulbeckusersWritten by: Sullivan Beck (sbeck@cpan.org) Any suggestions, bug reports, or donations :-) should be sent to me. 1.00 2010-04-29 - Initial release. 1.01 2010-04-29 - Use File::Basename and IO::File to get rid of two unix dependencies. 1.02 2011-06-23 - Added 'width' method. - Updated some tests which did not run correctly with perl 5.015. Renee Baecker 1.03 2011-06-28 - Missed one of the tests that fail with perl 5.015. 1.04 2013-03-18 - The __nl__ substitution wasn't happening. 1.05 2013-03-20 - Fixed the Build.PL/Makefile.PL scripts to not install the example scripts. RT 84091 1.06 2015-02-11 - Fixed some typos. RT 88523. - Changed ChangeLog file to new Changes specification. - Documentation improvments - Put distro on GitHub. Gabor Szabo - Fixed Module::Build requirement. RT 102059. Test-Inter-1.06/LICENSE0000644000175000001440000000015512466660012014016 0ustar sulbeckusersThis module is free software; you can redistribute it and/or modify it under the same terms as Perl itself. Test-Inter-1.06/Makefile.PL0000644000175000001440000000310112466660012014755 0ustar sulbeckusers###################################################################### # Makefile.PL for Test::Inter ###################################################################### use strict; use warnings; use 5.006; use ExtUtils::MakeMaker 6.30; my %module_prereq = ( 'File::Basename' => '0', 'IO::File' => '0', ); my %script_prereq = ( ); my %test_prereq = ( 'Test::More' => '0', ); my %config_prereq = ( 'ExtUtils::MakeMaker' => '6.30', ); %module_prereq = (%module_prereq,%script_prereq); my $EU_MM_V = $ExtUtils::MakeMaker::VERSION; $EU_MM_V =~ s/_//g; if ($EU_MM_V < 6.64) { %config_prereq = (%config_prereq,%test_prereq); } WriteMakefile( NAME => "Test::Inter", VERSION => "1.06", ABSTRACT => "framework for more readable interactive test scripts", LICENSE => "perl", ($EU_MM_V >= 6.48 ? (MIN_PERL_VERSION => "5.006") : ()), AUTHOR => "Sullivan Beck (sbeck\@cpan.org)", "dist" => {COMPRESS=>"gzip",SUFFIX=>"gz"}, PL_FILES => {}, PREREQ_PM => \%module_prereq, ($EU_MM_V >= 6.52 ? (CONFIGURE_REQUIRES => \%config_prereq) : ()), ($EU_MM_V >= 6.64 ? (TEST_REQUIRES => \%test_prereq) : ()), ($EU_MM_V > 6.45 ? (META_MERGE => { resources => { repository => { type => "git", web => "https://github.com/SBECK-github/Test-Inter", url => "git://github.com/SBECK-github/Test-Inter.git", } } }) : ()), ); Test-Inter-1.06/Build.PL0000644000175000001440000000333312466660012014306 0ustar sulbeckusers###################################################################### # Build.PL for Test::Inter ###################################################################### use strict; use warnings; use 5.006; use Module::Build 0.21; my %module_prereq = ( 'perl' => '5.006', 'File::Basename' => '0', 'IO::File' => '0', ); my %script_prereq = ( ); my %test_prereq = ( 'Test::More' => '0', ); my %config_prereq = ( 'Module::Build' => '0.21', ); my %build_prereq = %config_prereq; if ($Module::Build::VERSION < 0.4004) { %build_prereq = (%config_prereq,%test_prereq); } my $class = Module::Build->subclass( class => "Module::Build::Custom", code => q { sub ACTION_html { my $self = shift ; $self->SUPER::ACTION_html ( @_ ) if ( $ENV{ MAKE_HTML } ) ; } } ); my $build = $class->new( module_name => "Test::Inter", dist_version => "1.06", dist_abstract => "framework for more readable interactive test scripts", license => "perl", dist_author => "Sullivan Beck ", requires => \%module_prereq, ($Module::Build::VERSION >= 0.4004 ? (test_requires => \%test_prereq) : ()), ($Module::Build::VERSION >= 0.30 ? (configure_requires => \%config_prereq) : ()), build_requires => \%build_prereq, recommends => \%script_prereq, sign => 1, ($Module::Build::VERSION >= 0.28 ? (meta_merge => { resources => { repository => "https://github.com/SBECK-github/Test-Inter", } }) : ()), ); $build->create_build_script;