Text-RecordParser-v1.6.3000755000765000024 012201220566 14753 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/Build.PL000444000765000024 435512201220566 16413 0ustar00kclarkstaff000000000000use strict; use Module::Build; my $class = Module::Build->subclass( class => 'TRP::Builder', code => q* sub ACTION_docs { require 'Pod/Readme.pm'; require 'Pod/Select.pm'; require 'Pod/Markdown.pm'; my $self = shift; my $pod = 'README.pod'; Pod::Select::podselect({ -output => $pod }, 'lib/Text/RecordParser.pm'); my $parser = Pod::Readme->new(); $parser->parse_from_file('README.pod', 'README'); open my $pod_fh, '<', $pod or die "Can't read POD '$pod'"; open my $md_fh , '>', 'README.md' or die "Can't write README.md"; my $md = Pod::Markdown->new; $md->parse_from_filehandle($pod_fh); print $md_fh $md->as_markdown; close $pod_fh; close $md_fh; return $self->SUPER::ACTION_docs; } * ); my $build = $class->new( module_name => 'Text::RecordParser', dist_author => 'Ken Youens-Clark ', dist_version_from => 'lib/Text/RecordParser.pm', add_to_cleanup => [ '$(DISTNAME)-$(VERSION).tar.gz' ], dist_abstract => 'Parse record-oriented data in a text file', license => 'gpl', script_files => ['bin/tablify', 'bin/tabmerge', 'bin/tab2graph'], configure_requires => { 'Module::Build' => 0.40, 'Pod::Markdown' => 0, 'Pod::Readme' => 0, 'Pod::Select' => 0, }, requires => { 'IO::Scalar' => 0, 'Class::Accessor' => 0, 'Readonly' => 0, 'List::Util' => 0, 'List::MoreUtils' => 0, 'Text::Autoformat' => 0, 'version' => 0, }, build_requires => { 'Test::More' => 0, 'Test::Exception' => 0, 'Pod::Readme' => 0, 'Pod::Select' => 0, 'Pod::Markdown' => 0, }, recommends => { 'Readonly::XS' => 0, 'Text::TabularDisplay' => '1.22', 'GraphViz' => 0, }, ); $build->create_build_script; Text-RecordParser-v1.6.3/Changes000444000765000024 716012201220566 16407 0ustar00kclarkstaff0000000000001.6.3 August 9 2012 - Fixed bug in "Build.PL" regarding "podselect," updated Build.PL reqs 1.6.2 August 9 2012 - Made output of "tablify -v" prettier using Text::Autoformat 1.6.1 April 30 2013 - Removed a debug statement - Fixed INSTALL 1.6.0 April 30 2013 - Moved to Git repo at github.com:kyclark/text-recordparser.git - Changes to "tablify" - Allow for comment lines - Added more single-letter argument names - Allow for definition of column names 1.5.0 July 21 2010 - Some small changeds to be more defensive about non-existent fields 1.4.0 February 18 2010 - Not sure, but I made a release here 1.3.0 April 20 2009 - Guess record separator based on file extension - Automatically escape single quotes in incoming text for parse_line (resolves RT #34844) - Added "strip-quotes" to tablify (related to RT #40664) - Now more conservative on trimming whitespace from individual fields rather than the whole line (resolves RT #38871) - Added Text::RecordParser::Object (resolves RT #38338) - Added tab2graph 1.2.1 March 6 2006 - Fixed bug in "tablify" that didn't checked definedness of value when printing vertically, added tests and documentation on vertical display 1.1.1 February 21 2006 - Fixed RT bug #17787 submitted by Carl Franks on failed test on Win32 - Fixed RT bug #17788 submitted by Carl Franks on failed test when prereq Text::TabularDisplay is not installed - Fixed off-by-one bug in "--limit" option for "tablify", added test 1.1.0 February 17 2006 - Automatically strip backslash-escaped single quotes (because quotes *must* be escaped to parse) - Added code to "tablify" to display records vertically 1.0.1 February 8 2006 - Fixed a bug in Build.PL that failed to install scripts 1.0.0 December 9 2005 - Fixed bug in "extract" that caused infinite loops when called in a loop context (Sharon Wei) - Added convenience args to "new" ("fs" for "field_separator", "rs" for "record_separator") - Added Text::RecordParser::Tab (convenience class for tab files) - Added back in test for scripts as Text::TabularDisplay 1.21 fixes earlier bug causing tests to fail - Coverted to Module::Build - Now with more test coverage! 0.09 August 2 2005 - Just one arg to "new" means a filename - Removed tests that break with bad version of Text::TabularDisplay 0.08 November 16 2004 - Added "tabmerge" script - Fixed bug in "no-headers" parsing that reordered fields 0.07 September 3 2004 - Bug fixes to whitespace parsing 0.06 April 6 2004 - Added ability to split on whitespace by adding support for regexes as the "field_separator" - Added "trim" method to automatically removed leading and trailing whitespace from fields (to better support parsing of whitespace-separated data) - Improved test suites to run outside of traditional "make test" - Added "tablify" script 0.05 December 16 2003 - Added "quotemeta" to field separator argument to "parse_line" to fix error reported by Olaf Weinert 0.04 November 5 2003 - Added "comment" method to identify lines which should be skipped as comments - Altered "fetchrow_array" to croak if it reads a line but can't parse it into fields, also changed to skip empty lines automatically 0.03 June 27 2003 - Cleaned up "extract" a bit - Allow "new" to accept a single argument and treat as "filename" 0.02 May 6 2003 - Added "data" method to allow reading of data from a scalar - Allow "fetchall_hashref" to use a computed field for the key 0.01 Apr 28 2003 - Created initial version Text-RecordParser-v1.6.3/INSTALL000444000765000024 12312201220566 16115 0ustar00kclarkstaff000000000000$ perl Build.PL $ sudo cpanm --installdeps . $ ./Build test $ sudo ./Build install Text-RecordParser-v1.6.3/Makefile.PL000444000765000024 226312201220566 17065 0ustar00kclarkstaff000000000000# Note: this file was auto-generated by Module::Build::Compat version 0.3607 unless (eval "use Module::Build::Compat 0.02; 1" ) { print "This module requires Module::Build to install itself.\n"; require ExtUtils::MakeMaker; my $yn = ExtUtils::MakeMaker::prompt (' Install Module::Build now from CPAN?', 'y'); unless ($yn =~ /^y/i) { die " *** Cannot install without Module::Build. Exiting ...\n"; } require Cwd; require File::Spec; require CPAN; # Save this 'cause CPAN will chdir all over the place. my $cwd = Cwd::cwd(); CPAN::Shell->install('Module::Build::Compat'); CPAN::Shell->expand("Module", "Module::Build::Compat")->uptodate or die "Couldn't install Module::Build, giving up.\n"; chdir $cwd or die "Cannot chdir() back to $cwd: $!"; } eval "use Module::Build::Compat 0.02; 1" or die $@; Module::Build::Compat->run_build_pl(args => \@ARGV); my $build_script = 'Build'; $build_script .= '.com' if $^O eq 'VMS'; exit(0) unless(-e $build_script); # cpantesters convention require Module::Build; Module::Build::Compat->write_makefile(build_class => 'Module::Build'); Text-RecordParser-v1.6.3/MANIFEST000444000765000024 146412201220566 16246 0ustar00kclarkstaff000000000000bin/foo.png bin/tab2graph bin/tablify bin/tabmerge Build.PL Changes INSTALL lib/Text/RecordParser.pm lib/Text/RecordParser/Object.pm lib/Text/RecordParser/Tab.pm Makefile.PL MANIFEST This list of files README.md t/00-pipe.t t/01-new.t t/02-filename-fh.t t/03-separator.t t/04-bind.t t/05-fetch.t t/06-filter.t t/07-parse.t t/08-compute.t t/09-comment.t t/10-tablify.t t/11-tabmerge.t t/12-object.t t/13-tab2graph.t t/14-trim.t t/data/bad-file t/data/commented.dat t/data/commented2.dat t/data/empty t/data/merge1.tab t/data/merge2.tab t/data/merge3.tab t/data/numbers.csv t/data/people-no-header.dat t/data/people.dat t/data/pipe.dat t/data/simpsons.alt t/data/simpsons.csv t/data/simpsons.pdd t/data/simpsons.ssv t/data/simpsons.tab t/data/tabular.tab t/data/trim.csv t/pod-coverage.t t/pod.t TODO META.yml META.json Text-RecordParser-v1.6.3/META.json000444000765000024 355112201220566 16535 0ustar00kclarkstaff000000000000{ "abstract" : "Parse record-oriented data in a text file", "author" : [ "Ken Youens-Clark " ], "dynamic_config" : 1, "generated_by" : "Module::Build version 0.4007, CPAN::Meta::Converter version 2.120921", "license" : [ "open_source" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : "2" }, "name" : "Text-RecordParser", "prereqs" : { "build" : { "requires" : { "Pod::Markdown" : "0", "Pod::Readme" : "0", "Pod::Select" : "0", "Test::Exception" : "0", "Test::More" : "0" } }, "configure" : { "requires" : { "Module::Build" : "0.4", "Pod::Markdown" : "0", "Pod::Readme" : "0", "Pod::Select" : "0" } }, "runtime" : { "recommends" : { "GraphViz" : "0", "Readonly::XS" : "0", "Text::TabularDisplay" : "1.22" }, "requires" : { "Class::Accessor" : "0", "IO::Scalar" : "0", "List::MoreUtils" : "0", "List::Util" : "0", "Readonly" : "0", "Text::Autoformat" : "0", "version" : "0" } } }, "provides" : { "Text::RecordParser" : { "file" : "lib/Text/RecordParser.pm", "version" : "v1.6.3" }, "Text::RecordParser::Object" : { "file" : "lib/Text/RecordParser/Object.pm", "version" : "v1.4.0" }, "Text::RecordParser::Tab" : { "file" : "lib/Text/RecordParser/Tab.pm", "version" : "v1.4.0" } }, "release_status" : "stable", "resources" : { "license" : [ "http://opensource.org/licenses/gpl-license.php" ] }, "version" : "v1.6.3" } Text-RecordParser-v1.6.3/META.yml000444000765000024 211412201220566 16357 0ustar00kclarkstaff000000000000--- abstract: 'Parse record-oriented data in a text file' author: - 'Ken Youens-Clark ' build_requires: Pod::Markdown: 0 Pod::Readme: 0 Pod::Select: 0 Test::Exception: 0 Test::More: 0 configure_requires: Module::Build: 0.4 Pod::Markdown: 0 Pod::Readme: 0 Pod::Select: 0 dynamic_config: 1 generated_by: 'Module::Build version 0.4007, CPAN::Meta::Converter version 2.120921' license: open_source meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: 1.4 name: Text-RecordParser provides: Text::RecordParser: file: lib/Text/RecordParser.pm version: v1.6.3 Text::RecordParser::Object: file: lib/Text/RecordParser/Object.pm version: v1.4.0 Text::RecordParser::Tab: file: lib/Text/RecordParser/Tab.pm version: v1.4.0 recommends: GraphViz: 0 Readonly::XS: 0 Text::TabularDisplay: 1.22 requires: Class::Accessor: 0 IO::Scalar: 0 List::MoreUtils: 0 List::Util: 0 Readonly: 0 Text::Autoformat: 0 version: 0 resources: license: http://opensource.org/licenses/gpl-license.php version: v1.6.3 Text-RecordParser-v1.6.3/README.md000444000765000024 3102212201220566 16405 0ustar00kclarkstaff000000000000# NAME Text::RecordParser - read record-oriented files # SYNOPSIS use Text::RecordParser; # use default record (\n) and field (,) separators my $p = Text::RecordParser->new( $file ); # or be explicit my $p = Text::RecordParser->new({ filename => $file, field_separator => "\t", }); $p->filename('foo.csv'); # Split records on two newlines $p->record_separator("\n\n"); # Split fields on tabs $p->field_separator("\t"); # Skip lines beginning with hashes $p->comment( qr/^#/ ); # Trim whitespace $p->trim(1); # Use the fields in the first line as column names $p->bind_header; # Get a list of the header fields (in order) my @columns = $p->field_list; # Extract a particular field from the next row my ( $name, $age ) = $p->extract( qw[name age] ); # Return all the fields from the next row my @fields = $p->fetchrow_array; # Define a field alias $p->set_field_alias( name => 'handle' ); # Return all the fields from the next row as a hashref my $record = $p->fetchrow_hashref; print $record->{'name'}; # or print $record->{'handle'}; # Return the record as an object with fields as accessors my $object = $p->fetchrow_object; print $object->name; # or $object->handle; # Get all data as arrayref of arrayrefs my $data = $p->fetchall_arrayref; # Get all data as arrayref of hashrefs my $data = $p->fetchall_arrayref( { Columns => {} } ); # Get all data as hashref of hashrefs my $data = $p->fetchall_hashref('name'); # DESCRIPTION This module is for reading record-oriented data in a delimited text file. The most common example have records separated by newlines and fields separated by commas or tabs, but this module aims to provide a consistent interface for handling sequential records in a file however they may be delimited. Typically this data lists the fields in the first line of the file, in which case you should call `bind_header` to bind the field name (or not, and it will be called implicitly). If the first line contains data, you can still bind your own field names via `bind_fields`. Either way, you can then use many methods to get at the data as arrays or hashes. # METHODS ## new This is the object constructor. It takes a hash (or hashref) of arguments. Each argument can also be set through the method of the same name. - filename The path to the file being read. If the filename is passed and the fh is not, then it will open a filehandle on that file and sets `fh` accordingly. - comment A compiled regular expression identifying comment lines that should be skipped. - data The data to read. - fh The filehandle of the file to read. - field\_separator | fs The field separator (default is comma). - record\_separator | rs The record separator (default is newline). - field\_filter A callback applied to all the fields as they are read. - header\_filter A callback applied to the column names. - trim Boolean to enable trimming of leading and trailing whitespace from fields (useful if splitting on whitespace only). See methods for each argument name for more information. Alternately, if you supply a single argument to `new`, it will be treated as the `filename` argument. ## bind\_fields $p->bind_fields( qw[ name rank serial_number ] ); Takes an array of field names and memorizes the field positions for later use. If the input file has no header line but you still wish to retrieve the fields by name (or even if you want to call `bind_header` and then give your own field names), simply pass in the an array of field names you wish to use. Pass in an empty array reference to unset: $p->bind_field( [] ); # unsets fields ## bind\_header $p->bind_header; my $name = $p->extract('name'); Takes the fields from the next row under the cursor and assigns the field names to the values. Usually you would call this immediately after opening the file in order to bind the field names in the first row. ## comment $p->comment( qr/^#/ ); # Perl-style comments $p->comment( qr/^--/ ); # SQL-style comments Takes a regex to apply to a record to see if it looks like a comment to skip. ## data $p->data( $string ); $p->data( \$string ); $p->data( @lines ); $p->data( [$line1, $line2, $line3] ); $p->data( IO::File->new('extract( qw[ foo bar baz ] ); Extracts a list of fields out of the last row read. The field names must correspond to the field names bound either via `bind_fields` or `bind_header`. ## fetchrow\_array my @values = $p->fetchrow_array; Reads a row from the file and returns an array or array reference of the fields. ## fetchrow\_hashref my $record = $p->fetchrow_hashref; print "Name = ", $record->{'name'}, "\n"; Reads a line of the file and returns it as a hash reference. The keys of the hashref are the field names bound via `bind_fields` or `bind_header`. If you do not bind fields prior to calling this method, the `bind_header` method will be implicitly called for you. ## fetchrow\_object while ( my $object = $p->fetchrow_object ) { my $id = $object->id; my $name = $object->naem; # <-- this will throw a runtime error } This will return the next data record as a Text::RecordParser::Object object that has read-only accessor methods of the field names and any aliases. This allows you to enforce field names, further helping ensure that your code is reading the input file correctly. That is, if you are using the "fetchrow\_hashref" method to read each line, you may misspell the hash key and introduce a bug in your code. With this method, Perl will throw an error if you attempt to read a field not defined in the file's headers. Additionally, any defined field aliases will be created as additional accessor methods. ## fetchall\_arrayref my $records = $p->fetchall_arrayref; for my $record ( @$records ) { print "Name = ", $record->[0], "\n"; } my $records = $p->fetchall_arrayref( { Columns => {} } ); for my $record ( @$records ) { print "Name = ", $record->{'name'}, "\n"; } Like DBI's fetchall\_arrayref, returns an arrayref of arrayrefs. Also accepts optional "{ Columns => {} }" argument to return an arrayref of hashrefs. ## fetchall\_hashref my $records = $p->fetchall_hashref('id'); for my $id ( keys %$records ) { my $record = $records->{ $id }; print "Name = ", $record->{'name'}, "\n"; } Like DBI's fetchall\_hashref, this returns a hash reference of hash references. The keys of the top-level hashref are the field values of the field argument you supply. The field name you supply can be a field created by a `field_compute`. ## fh open my $fh, '<', $file or die $!; $p->fh( $fh ); Gets or sets the filehandle of the file being read. ## field\_compute A callback applied to the fields identified by position (or field name if `bind_fields` or `bind_header` was called). The callback will be passed two arguments: - 1 The current field - 2 A reference to all the other fields, either as an array or hash reference, depending on the method which you called. If data looks like this: parent children Mike Greg,Peter,Bobby Carol Marcia,Jane,Cindy You could split the "children" field into an array reference with the values like so: $p->field_compute( 'children', sub { [ split /,/, shift() ] } ); The field position or name doesn't actually have to exist, which means you could create new, computed fields on-the-fly. E.g., if you data looks like this: 1,3,5 32,4,1 9,5,4 You could write a field\_compute like this: $p->field_compute( 3, sub { my ( $cur, $others ) = @_; my $sum; $sum += $_ for @$others; return $sum; } ); Field "3" will be created as the sum of the other fields. This allows you to further write: my $data = $p->fetchall_arrayref; for my $rec ( @$data ) { print "$rec->[0] + $rec->[1] + $rec->[2] = $rec->[3]\n"; } Prints: 1 + 3 + 5 = 9 32 + 4 + 1 = 37 9 + 5 + 4 = 18 ## field\_filter $p->field_filter( sub { $_ = shift; uc(lc($_)) } ); A callback which is applied to each field. The callback will be passed the current value of the field. Whatever is passed back will become the new value of the field. The above example capitalizes field values. To unset the filter, pass in the empty string. ## field\_list $p->bind_fields( qw[ foo bar baz ] ); my @fields = $p->field_list; print join ', ', @fields; # prints "foo, bar, baz" Returns the fields bound via `bind_fields` (or `bind_header`). ## field\_positions my %positions = $p->field_positions; Returns a hash of the fields and their positions bound via `bind_fields` (or `bind_header`). Mostly for internal use. ## field\_separator $p->field_separator("\t"); # splits fields on tabs $p->field_separator('::'); # splits fields on double colons $p->field_separator(qr/\s+/); # splits fields on whitespace my $sep = $p->field_separator; # returns the current separator Gets and sets the token to use as the field delimiter. Regular expressions can be specified using qr//. If not specified, it will take a guess based on the filename extension ("comma" for ".txt," ".dat," or ".csv"; "tab" for ".tab"). The default is a comma. ## filename $p->filename('/path/to/file.dat'); Gets or sets the complete path to the file to be read. If a file is already opened, then the handle on it will be closed and a new one opened on the new file. ## get\_field\_aliases my @aliases = $p->get_field_aliases('name'); Allows you to define alternate names for fields, e.g., sometimes your input file calls city "town" or "township," sometimes a file uses "Moniker" instead of "name." ## header\_filter $p->header_filter( sub { $_ = shift; s/\s+/_/g; lc $_ } ); A callback applied to column header names. The callback will be passed the current value of the header. Whatever is returned will become the new value of the header. The above example collapses spaces into a single underscore and lowercases the letters. To unset a filter, pass in the empty string. ## record\_separator $p->record_separator("\n//\n"); $p->field_separator("\n"); Gets and sets the token to use as the record separator. The default is a newline ("\\n"). The above example would read a file that looks like this: field1 field2 field3 // data1 data2 data3 // ## set\_field\_alias $p->set_field_alias({ name => 'Moniker,handle', # comma-separated string city => [ qw( town township ) ], # or anonymous arrayref }); Allows you to define alternate names for fields, e.g., sometimes your input file calls city "town" or "township," sometimes a file uses "Moniker" instead of "name." ## trim my $trim_value = $p->trim(1); Provide "true" argument to remove leading and trailing whitespace from fields. Use a "false" argument to disable. # AUTHOR Ken Youens-Clark # SOURCE http://github.com/kyclark/text-recordparser # CREDITS Thanks to the following: - Benjamin Tilly For Text::xSV, the inspirado for this module - Tim Bunce et al. For DBI, from which many of the methods were shamelessly stolen - Tom Aldcroft For contributing code to make it easy to parse whitespace-delimited data - Liya Ren For catching the column-ordering error when parsing with "no-headers" - Sharon Wei For catching bug in `extract` that sets up infinite loops - Lars Thegler For bug report on missing "script\_files" arg in Build.PL # BUGS None known. Please use http://rt.cpan.org/ for reporting bugs. # LICENSE AND COPYRIGHT Copyright (C) 2006-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Text-RecordParser-v1.6.3/TODO000444000765000024 46212201220566 15562 0ustar00kclarkstaff000000000000- Some way to control files with a variable number of columns, e.g. foo,bar,baz,quux flim,flam,flom If I want the above parsed into just three fields, would I want: foo,bar,[baz,quux] flim,flam,[flom] Or just: foo,bar,baz flim,flam,flom How would I specify this in the API? Text-RecordParser-v1.6.3/bin000755000765000024 012201220566 15523 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/bin/foo.png000444000765000024 10142112201220566 17210 0ustar00kclarkstaff000000000000PNG  IHDRGJbKGD IDATxy\NWH -R=kv}_k0vƾdKJaagNBENw]}5:uщB!ҥ !B)BtL !"B@!HǤB!ұLիhnܸÿ{>"#_!Y={6s;)9ȟ?7%Jd"*W~77ӑErx0#GqEקxq O\ɟ??FFFdϞ͎ΘD #44;ƢKQVEԩBZUșXw.?̙+9sgp5?~ >Ŋ#O<ʕ333Ȟ=;dΜY@xx8/^ŋ@py!(]krTOREQ3$&&rum;pMf5ښ:uRreʔ)CѢEɐ@rM_ٳg9v(Ν'>>+hݺ!ŋJNK7>}'8y/nuuUFR(Q Htdd$7o͛p)Ξ=/ȝیMkҬY-5AF2H{_ɓs ?(Q;iٲ%HH=ʖ-[ضm+TZ^~cGe3H"}zW`ݺ`aQf͚ӤIԩC9R}fʔߟ3M^zŴip¢+VBu,Juΐ!C5jFFikѣGqr͉'ڵ#05qғމfͺ_~IE>SNʕRF'_:rtꈝ=k[L:5uرlذR_Gu,dF@v0ot]udό34i]d|g9U s:vAƌYp-ZP)ńȒ%K۷eVKhGck;Xv؉HMv?aeU ``Eu$%62t 6mʊ+16NS۷o矻QD!֯-m895ҬY? 9yT/4iѣǸym &&&Vu$ܖ/7Ι["e˖9sx6셿HBdF {% $1Q#GGڵV,6̖۱cS,d%xyyb*,,-o.Ou$2#OX? s9ܖ#Yzӧ/aժURccc`"i3/^$4@ tjmlzM6cf~+W?\I 8y8"?'NTJٳggDEӣXe9SPHH8>݅~ʁ9{v3J}aeՁ:ں֪d${thڴŔ*U}&ۓ#Gttt‚k0iҤGg̘d3M>/߬:H!ˉ3Y>SSӤdBѢE8p {]ɒ%qw`<}:HF2#ܽe[?PF_.\swnE?soʕ+lc&K2n7oF__; G``0K8sŊjժ̛7x}3055ԩS͛>ٙ)S|Hzu)S&cKf/+W6YB P@wm۶q=N8E@.]efoQ-^%K& U"Eȕ+X[[3n8,X={}ΦMxg_Ki:::;+ʬ@"@:ϊ'yN |Ν;`ǎȘC__]IuAQQ,Y!C&gvp~{>*M6pzQD2B p:AA)r~ݺum۶O.\I&ԭ[ŋޣGXt)UTu֜>}A7o^lllx2/e G$g ? }NVRlGPxqX"Kjժ;v۷oDHH'O~צMڵkɔ):::d˖K&WbEttt4r9Zf˖B z:r ȟ4) ,`ժUݻlٲ}t;___իǟI6mRJȑ=z/_>۷/DDDIvW .dȑɚf͚ɓɟ??mڴ^JQsMκL24jjժp̙doddD"-RFǸv:tAnݚrqY{}g˗/c``@|ק|ܹsi׮ua֭'i&eZYYK{H!3gNaaaux5ܹsСCMhglzsWjZ5^NDDDЬY3<==^%MOOR@);vL~ (꿯%8)Z4JW3޽ _GLL I^zݻw111ի\z59z 2߹z@S|}}(U'@:Rf%._Bxxx~Uؗ/_xI ̙3fɓŋSreBGG̙3vZjժ8y$K,!(('''N:εŋ|IUshРGMBrРAIka|}})ZhFXX7fÆ ڢԩSɖ-'N$$$мys_]tx)r?n,,4; RHR3{eu9mzQuaCtcӦIS5kbiiIƍ)]4yԫW?Q.R3 TfJL2Yu3~3}#_\ .?g {Q TXw>})GC/K:]9u4*iéźuׯׯLsѣǔ/# ,k׮hW^QVMW/ܹ^)R'H*U*M?ҩSG""){ݣ~H|6m䭰ۇO3e qD2tWԨљz{8^eetsHzvСs;v\OLNʌ9rd˗TG$3HfgjWmƐ!|_LL mxyM" Zp< ^|lL8?.E@%@:Vlq6odE8::j!>(::.]:s9YBΜL*sf]oOB[.]RI+3tP\\fr [7RIh\XƝ9sf?:ƅиq#<](^\ӻٳm|*V,A5?UGR*88MKXփ~j: )?P]sv7năTGҘK.Q58v/*TkkYش7 FϞ=ݻ7aa1T\pJ~HBäԭ[#GVi +V`Æ #%D<<<^:r_rt30i 6m;aiY-["BBBի͚5z+<or0_vמ\'b/_b͚]fΜLɒ%hӦ-vvvXYYxÇٲe 7o"(1MԤG;ZnDLS4H_ngꝬ^۷SLilmyԮ]̙3+CÞ=ٻw/4kVΝm|B{H! իXf۶ƍ͛PV-֭Kҥɘ1y111\xcǎq9BXX8UutdC ̙+l~{O/Y`mmuuZ*y111\zӧOsN>ſ(; VEڴnCeݼyu0wJ2d@hh8zzz.]ҥPtiO\ȗ/FFF!CHLL$,,,!!!Ç~:׮]6qqq(Z*QNlm7I}'.rn'>>cc#/N%)Qs&wܘaffFl#sd͚'""'OHpp0ܸ/7o޽Ǔ={Vj׮UYճF23&>K , ޽ǹzuի; ^466$ܔ*U2eaiiAժT!11Fz̙ùq.no? 0 /suu3annJ)Y0ŋDѵ[Ʀ#VI! ŋRF'/Nǎ6W>/^?!&&m^B֯Cn9~|UX~pCŋMTT4:::0ٲcnnѩH._ҥf%)wy?+=!(ʕkMXtJ@ٲ4ɞaúظ"uK] q>N٣ 儇`Ԕ}wy1ӧ/&884EEFFA׮-?:)DzA f-ѱsLGE֬8;ME$fgB;>eB Ԫc޽DuΟF͚Yڍm#\_"ډgK +VlgϮW*U,ڵ%#KW|m]ʖmE6M3Iu$AAO)SFرUZJr6||n0k(Bh ߉y(ȝ;'G͋G#=oKK q ~~xzbܸ*Ct'٪-%boQE莅EA:ef֬Q]S|TZH Ev=cʮGO}!\]GjSZjHV BBB8BbAE~i(osz RⱲjGbٰa8uVVXxݺRGhIm;])i2\u/biiAm;vϟG#5ʍ-ФIMq Ϟ1u" ꂅEAqĉDG:"ROz̜9Bu! }UG*8;dܕܹ@u%뱷!45?~}='08_m-ZwQł⣆M\-UЄ-W!C쥼O`k;={~aCkqboиkXd]zЄmriRmФIMlm1bLUɌ >O])::`eU+R}T #88B!ËlvN1kVzHNs$0 Ӧ Q%YXX_:1y= SG(${y=h߾9jURGWW/FEyTI6cGW7Ӧ-VE($xoE`O:Zcܸeg$+ClLhϢEkvOu"IPSf\!̜9,$=(_$TGbAo}ww4[Ԯ]vNtt8"ɌH7]_#22 K˖4kVE&qR\kaĈ$3"5A!psŋL4HuQ@FɌK z:HARsQ3=\8BhXرɕTu3bDO3~("I!ιyyDz˗_:J20CYb+_SGY#='8a¯ ]u!6m=qR\bb"$11t@ tSMeVGyv\:2_V. :th:095N;vMqs%Eo,_kn:RUXҽc!22Jua2#%$$PzGw#=AҶt<} IDAT`8=lV ܕQGhC˗oUzi(7rΉS?<?U2: rqH!111TGo v:٣ɘQvӸq Ft%!!AuX0Ξ]OLUB8*WnKɒEذa8ZT9ӫWqD27G!^[x-0k(QZ20#&xy8"I!3|LZjHTB+<} <+ SG۟xM[:HfR޽G&C_? GU%U055f„_YoUH 4m3dH7zxʕ,]S-8F>?aaQQTGH qMeփ\CvxBѴi"""9~|TuTh ҍ)SY8;I!EEENǎ6X[WG32bDO5W'st샞^f&N:NRAo3"pv SScF:J={VMF]SG|)Ҙ`zg\᯿2,YTI3t J1l(;b4wq9r+WO 11u%^9NPϬ_?V#A&Dy^&Eo^srQҤ+СC FtYeVI|%H#v=zzٿ8BhHʖmM],:N2e~`̘88VG|%Y#FzTGBkyIkΨQqq`qWB dعن J#Vwg`̘#ƍ:JRf-G1z _;.dɢk;]UG|)R{t=BgشWבʚҶmSj׮̰a.$&B r;5?.\(;>;7t=BKFnR-]0WqgȌ@*lF_G:u!cP$}333a̘/qgH! `yK'J,:Z. aҤAgW'ݳBy=CuRB#])SƂ=ۨ"]Ld}9rNu R2o]wqfEƌ[66ui֬#F:Y,ʴj O9ujbcT ʕg#]*Wnܹc'qȑ$ٵ{G7o WE|@ɒE0&xBur4I%bcpttmۦԫg:Z!8830bD ʫ:g_>}$CH%[̘1Lu!\ 1(3a¯̟_߻!@*>; SGs?Cɖ@u{DpttWEY, 8ݻr6 FF=ȑ討#ghڴ[ΧE:7dF@`7ر ̞=ZT~jccf:xCf\ƽG7)[5Uk8+ݻ2e0CtSG 3Zm}=zzx}9!!aL:Du ǰa?3m"CUH!qtkזXYUG77/FK>7TId xeK[u!ӗ[ϦUHݺȒEoo/q5)PNO"/^biْ֭9NuPl+FŘ1TIdyxx8"֘9s)QQLh:ЀyqpìYx:N$d9eWGpC]`jj:АC3 cQ%]B@K.]cԨY):"4(K=f˪;F@ ܻrZ3c0#V8t ͚aE4iRSu6˗;J?4_߻\L2#rqqXYp|l:H!WܤZ,Y2nZnȩ:ömp"@7.[qs:HAʕO7n.R(.ԕO! gҤkg,, #RU..nH!FnݺϬY5N]DL;(B33Ə~{.H!Hhs&N']￷Yh & 08B;PP^F:J "S,$c #GQ\ zSE( wwGn=gTIPm3y`zxcǎwwG2d]Sz״iMll2l irԩ5R6TXիTZ֭T#߿8iRΝI<ygٳajjJ9Ȓ% %K0../^K={Ƴg!<0{*BPh~,-S| ʔR+ {qΝ׹v͏gB011TR)R?~ *D|077O:'EǏy<|rnܸAh&XZZP%UZbeUB&K!Qdde@Yh8" z>=zӧp-^$SL,Y%JRpa .L"E(Tȑ̙3!!!<}r޽?~~vϟ C +V+ԬYZ*StQL+ 7p)8ͩSx4===*T(OժV+WRJQLU}GPPׯ_͛\rsr111z4jdMF)YB$I鹊)3b"EFFqiv>ʁ}>TZ5jRBʕ+wݻwr W\̙?~gB066N4o^ ˗KI>%˗س(vS|jժI&MWUVE___uJLLƍ?~o^©P4u&E+?`׮#lڴ{@Æ hڴ7lٲivQbb"W\ax{fYc#lm={V1b:>>[ͤ:HbcػV`Ș1#-[e˖4oޜ9r8~8;wduܻw*Uҥ-۷\ߣ 3g嵑OӦMvlcԷ29m66mȾ}dʔ[ЧO[V-:ty5ɖ-󰱩:H^t.\C@4On?ӦMd[9z(Vbݺzm2eȘ)bbbYj;TT}ѹsg{T-<}!Cfp/33 %^{ &Mbɒ%T\ww˫%)GGwV;16.M|'d9:uj];5YjÇC_?3捣yybFGNݺ022LJ~Mddnnp&ԫ3NNI"ܼyyV1a"Yx{r СC̛7O/C׮]v:kקU_ppE\\}޷ޥ[ܹyӭ[o\|˗3x /ʕ./5d(ķKLLd֬??ޓnݺ`9 ֬YC߾}VJΜ_WP}oCgY3Yr%)RP=p"3SV9:H=ή]Gp"@|DFtc̙3?CdбcG?Ohذ'?M݉]h"2eK~XoVtuF#4*66ʕRt1֭PGrFblٲf͚<{ƍo_|#/.w8L. :]tuuYlq$;w:H-ZPErsd5lڴY 155{?3`g7ד}Q!p-ww ch۶-vvvhnnn5=p-ՑDʔ) 2 SGbN0vV\I-TI+Vdܸq)>nΜ9ǡ8:|ck'O2m4AGGbŊz/z???:ĦM`R1}t6mF۶CVG!&G_? }TGXddݺ9ѷo?ڷoq^f7nݺԯ_ƍd?ΤI46ȑC٭5,Y{vϮ4i>[̙$[ЏPBXZZCƌcbb֖;vh(vZ5+~!cSG/bm݁%K&ӭ[+qD*şk{/hӦ xzz;wn3gcƌٙ'jd|m8~'oHpw_) ,@BRps@qD0rUXҵkKQD*rL}sn݈aڵIE)>-FYPF 4hᾄ6CF 4 7?TG石;. 5zJ`޽lݺɓ'9rdٝ!g앟@ll7K[ӱcGׯOiݺ5n}|ӧ_177e˖DFF#.]J*Uhݺ5OfРA͛^|Uۭ]L2ClXtiR+C5ys.lذ.%DTT4tZ|mѺF ʥKbllLݺ$v,Yc'^͛cggݻɓ'7{UVn1/\@&M[.ŋCq {m>ZY> M6 BBB]6vvv>|˗/Fڵy{_~hϟϺuرc˖- _|q\BDDYݻwp¯ڮCۗ>}qAݺuY|Bжm[> KMgPM:HBCsElll4:Ή'(Ug ##˗/122b޽ܹs۷o'5z/44۷o}v)^z 8#GcƍcCժU177c棅?ff9111HJLر#xzz&LeʔI8uTo2eШTV x8ݮ]vԩS[?ӦM <8EXMMMəӔ7i|,<͋\ IDAT#{78"?-q5r ;vˋ}('ERm4QGy^%;˗?]PPPҿ8rܹ -bE@#龏 ~JHHFcǎߥK~-[CΝ{{w᧟~J_m۶(QѣGt_3>}FSlLvΝHQDRP^2d;w4:N߾})U}6mJoǓu+ڷoǯ=zoJLL+cG zșӄ͛7k$jѢz",, z&L w 4nJ\\#Gb}{{+C 8::rwH 7n$gNԩ+""gի-*RG!&&ԬYk[nܻw̙C^˾佷dž8BQQDVlg  {&lLԩDŞ"R?\x9 {|>u5e zzzx{{ShQJ,I2eR.XL+/^$((֭sȖM.]Za4 ~'N0`vQ']`A?)VVV4k֌۷sԩ+F7ngΜa0a]W=oԩSk$DTZRF tttȞ=;ԭ[7M6Td^`eՁ}+Y]0… L2%ݎM~SѣTG,&˗/߆ŋ7ߨDDDPt)\\~w^._(^vvQrXpaN DEE1rw"@|27/Ĉi;4)2/##C2edbcewB_"2%HᝌN:XXXЬY3*T@󁜔D~}NxzŞlVv'oDcgg:Jr)fϞݿ<P`۷7eC|,OyL]FlL~fٲeb"qqq 4[q m6TIdᯘ1WFM%UȘ1x4N:陲VH=!!!4oތ7oػם?^|Ý;7gϞl۶zBcߧn:ٳ_BNTGȌ0a$"3rqСݰeѢEhk׮Qf bb"9p`|k?{Fjr޼Jk֮]?B|5kP51: $2;wl3fq.qD&=ӧm&>͛S6%JȑoYMʗ/k43IܼyK~׮]qF 4aúqZʖ-:`fSb:ttt;vwʞ=Uʐի={dȐLM?|zzٙ=ێe0+WfԨQ} bȑTVp[ˬY?^wN3g8fMGE&Ҽy._FٲhԨF"<<\u !))caQS{b~JLLd]LHE)=yggg166dQ?cm݉Jʳi8B|d}9WK#8psaú8\r EɢbXznnk oeT.sٟ… ,]S@^~tF___nEܸb #ߊdߘ?FF88c%׳$%%q!f}6m8:nOB ٛ7qlذE6qTfȐ ##>\K6l˹zՇJ9{[?'aTЖ?TG^Yu /!gNjGx+Vśx9M4Ooҥ 㥙$N8ٲŃzl?IҼHk=Ν㏓'ФIcZhIӦM v?8r(ٲҲe=tiN6 044PShԪՃ5kG%%>>?8{ط8Yd}{[ڵkG֭115U||<'Od߾}lݺGS{{֘I[!ׯ:}'>ӧɟ?M4N:ԬYkkkgϞREll,>>>?ӧOs!)Z kPUh&M봶˗l~={pybcP -[~ԬY3C\dĭ[8y$xy'"+Wm&]WTR|Νx{s={L5ܜ *?~q۷}67n…\Û7oȟ?/66iҤMP\ qE&}zTf:.::Gϳo1s#eFը]+Wʊ+r ]8u!!SnUڴO)T(|Q|Οŋ\t_'1,Y"EPH/NBȟ??yIHx9O<xo&,}y(CjQÒ5 mDtt,4jT+U"]<ٳW9q]DEEKre)[JD,YbŊ?~LLLz944/^=zu]n޼ID+dBɒEUˊ:u[*+ֈ:,Npp7oGܽiO>'00f˖ܹz6:䜝WMӦRE1nJ.=TGxR|! 2 ܕIkqD&?c%!gٽ{tudj+Sb; 醣hqD&ԵܻK OnRTF7J,Ž TdD+3e_ÇOTLr*]ɖ/~8;U%Ð@ժ](W۶SGd]Oٹs8Bhp,-mׯ#NN?aȈW͊w{}NuIXpqWE踔lt8q(Nѣ\UiEGާFn :N"#Şpwߦ:rL!>JQu G TRL1'ӧJhKqҥ6ms#FؽۛCb/ }HEXX[V̙#\׶pBB9}z/UtjՊ_:N$gT;!ӧҥyOqٳA!p5k(DjI޼yػw8BKH#_`ii2eq2,i+RY,YpsǡCg-hFzԩ 12ʅQ24)@zUڵ%ƾQGdp/5k9vv(Z8BhK|Yvf 8L gXYu`ʔﰳ:Çr.9 $%%Ѹqd討Ɉ@)Z ?ԟYBuA%w=ߏR=냋@FPTT VVh֬6˖MSGd0III4izH|^m޼6KNSG+Ȉ@20#ɥK f/ΜGwWL6Ju!#iL:%d4I%_{5uwSq1\]8}*[x#2gtzH6n ȑTG*Rj,۷'#**Fu1g&LJyUB#>ɓdD@S^|H!>v֝puu`oTZ)X]Ú5 L4Lu!4 egபh5)(=6mcMHLMUB#}ګH!J*踔pqDʕۥw{s͛8\,!"""C&,\8YuZ̫W9uj,)KJڵNd:rs8+{_uFsK _,XOPPhDM9wqD*}UzxdzgXXn,$$K,̝;G/{8"͟N!>0y|LL:J%7ߴޕ7T'P|.^{='rS'Ӓ s,,3q0#RA9z'Dߒ={6^:N&#pi/T_)qv"@lڴo0g8Q2=PѱXZҨQ VtTG|'[6]ר#FҖVt4q2=Pz8;۱~.]UG|͛q :m5QQ̜) ipM'..j[XXGw<~UOŘ1TȈƛ;w<.c(3(zxs(V FVEE  W9wd9~:D涆afGu!4±cؾcɖMWuèP-ߛ:={ڵ;\SNxB @SY:xdffy4innk PG/}\\/W:"> #D\\<֝T<6#t=B|,<ٳHF2ltqqg?~Qu֬)]0yIߑ ]<ٳɚU8M77Y-M;wRjСUC>I27qܼy5kv">L$BhW*T(͠A]UG@ |  S. <8/w<_72uHs#Fؿ^F05L d@C}2{8qO<99 V킅E6ovSG 9ce@ƹ:u$mΝdz^^'ٷt=BcM<~O #TBB"5kvhxz.R'Jz*V,qaXXcĈL8?HAe͚77;Iq2Gh?}">XF5ԩ.ū/Ko)Q8Bh۬^G#>v<~%UGtM[>UGBcBNuJ(̏?~T4||njvzxGʧnnQG|"XP DFFaiiK۶ )d -Z &:: O &&J:R~5VtTG|̜Vm8Zo;vWW)^cTGIFDRR |~pWGk%w=Ueժ_TB#aii`Əkf2::::pv8:֚37^q(Bh7o~[QB@ԪU޽1~bbbU:Aa0 SGp56mڋ9r詎#L h!c2qPqJ9} ׮y ONI֫כ9 dJ2-SP>꺊 qFr3{t=B$[~/AudD@ %_VnUV ھt=B|,22 6fɪ㈯ #Z(G=f϶c9w8^rj:i1L:RudD@h1hNX/!zأGOT#ߏ[udD@s/Q%ÒGۻRx! Fu b*gРL<(q2G`zL3<=Ϯ]qsGlT SZ.88 v ,u|y^+ŋs8"Ȉ33Ô)1:=z:N!]swݻpq gdqqTڅ Jeq4^BB"5jtXWTЖ};0{8"Ɉ@&-.xzh}T8wy9r6L(!!g޽KUQN!>JŊ,ͤɄfœ98x4WG_ȓ'S+WN"ҘTҥc::2AA2ShAqWbOfAqDLL>1cS@RRM >>c晀dbEή?Pqի۷̝;^LBF2*VlGVXt8F!>U7LqD:L NN?f'WV'H#/_8ZudD@DÆ}ë1iԨ+W:#FU&N)Ȉ@GGssUv8:NKz~GQ&ͧ@3FVuΤTfAppp#**Fu4I ..+qpDyUB#>}8;ۡ]udj@ |E{~9޷xΟN9 $&&Rnosrq2" R(qⲒ'OUIuuvsWWQ"2" ʕ;Qk:j##CV#FҖN1Dq"2" ޣgg;<5jtcIuAdD@|gܿ[TGO1{{*W6o #$ecԨ^̜pq}/ȝPu!4>|WW @H}25 >Y }]`6m+" bc`mݙ-Xn8BəR|ܹ >}˖y{_uH#,XϳgAs-dD@|];}TI*U:St=B$ |-?ؗɓ#4Mdɒ{>ުX`=)S`d~:`RV^Uwos}:/=;-ZPu!4¥K['1PGh0_VV3:tHǎiҤIu%""sNGtt4[7dРδhQYY<Ë-[G"iժ5ZYfJ:ԖիW9t^^9yYi۶!ݻUra&!̛5k\#֬Ɉ3ظq#ݻwWG+̜9GǙxy^x&ƌqbɒ% 6LuLŋlقP[+KzgRh;wbcӓٳ9r8Zeȑݻ=ȓ'8;vVb  $5ڵQU i&eg߾ew H||Mm۶iZ$&&vZ5kF hԨ͚5c:uӧٱU;w.=z(ѫ=GN"_~ ttt(]47nLy$ޞkll.nS#>hwm̘7o'O ɽzΝ;cll… )Pv-y1qDL´iݺuʕ+sJj׶VG|`I>>jGU\sR!bE[F)M/ر#aaa9routt$>>^k !C-[:xǕ+_W\bŊrLڴi޽{xM߾;ed@ԀODD$ÇOcڵ3f:cǎLW7n<%_Q;M={[LGG']8: N:Rh OOoZn~ccc4hɑ#}IB T^'O}UooolmmСv ɉիӡC^ѣ155???9°a(Pڵ#**_bŊѠA}6oޟ_G -oﳴm6Msi222JF{3hРի9w/^|oѣ;wаaCFߙ7oܹst֍D-[Ǝ;ػw//ת|Kxz&111͏%Z"#_cii ] F__J*ܹCPPVVV+3fPH:w @]6OyNnݨ_>vJ9ر~!͆m}4իT4C?[.]53336m @58|V]UV%004?2{Y-7؇7o|k̙=*Gx˖-i޼9cƌa̘1w}bcc򂂂;L06mA^HHHˋ ~OS@ٵ[ ):ٲezzzTy-yH"5 ŋѭ[7J*S߰acǎe֬Y̛7iӦ1vX]6Gcnݚ*UDϞ=ٻw/ZJ0w ͞78XXBjJ]uRҗkU?ޕ|^z22]'>Z o޷AAA-Z4͎ӣG{]FJy)nnnL:K.annξ}{!K.eذa|chhɝ;7/^$$$SSӔܻwex„ tޝ+WpT?v-?p(s.IzK@@c̙Sq?Gz?@L\#  '!C`nn?c7sLlll077绹PJv ÇtޝϟS,.]P\9ƏŊ_ZXB+V҂v͛tԉGcǘ7o{q}޼yCpppʴ@tt4>%e!&& 储)S7Edɒ)S=z|-K,77owutZ9}4uʂQ\r4?V֭8p |q\\{ƾػ_R#G`m]_N -Ѻu==͏S\9^}r[֭\2-[{ܽ{WW.nrqq!<s,--֖ɓ'XjUmV:ujGgФ޽Kɒ%UQˋ .0eʔ4=΁رw@iz,\9{OFz?u!:tw7 O#if8%KVd򏻁̠m۶S`E֭[jJ搦'4%*cٲtMu^dT^Vj2}t9225ENo… :u`aaAfͨPB6lkװĈ2jHL2Bu dD@`fW= Aj2VTT۷PgJ3a;vN:ܖ*ޡChٲ<9DK -daQ{u(C_!,,}Xm$ԨRҥ 4oތOVZEvӧ=+WJH!jִs!;wnE愆ܻwZj=heYW*U9tHZ)**0dp$)2)XF59qbQ:gΜQ)غu+66055+WBu$ʖ-Ι3i֬Zb̘1DF7ĉT^={<ٽ{1S" B@˙~L8Q EXX}ok8|LT_Ѐ~ҥ?v*T0o? ^xiذ!%KyZ:BRdysB.ĢE R///ձ4Jbb"V¢"ؽ{1M&G}S~~}Uk׮4n܈'Nz GGGʕ+ˁشOE2mI!I0xpW.^Jٲhݺ5[Gu4:D6l:4ʕl,V9r  мy4߰+{%gϦT:3jTO_EUG@ Ld"l:W?UTm6?~\utȎ;U&͛7'\\ &.i@:U8p܉ ~Tbʕ+ROcܸq#FPHa~e&CtL2CCD*2$..8q"666l8]vPu4Ć Xl)ֶ ^Ru4ХK,Y-[קWуufBBBضm6ĉfӞܹI! .YOd͚={ѻwoիW']v&Wњ)wxκuشiWޢXt ;wf͚Oٿ?[xpArȎm֖ƍkfb(B@',,M~\xyMiޖ:дi 3R Ϯ]xyyKƵt }}Pwᱟmr}LLмy ZjE&M(V_,::s>M9)Qfff˗y7o^r̙5맜%))p"##yAAAxLPP>"669 (CJT<+ں| %= ƍ{ܸq['w< "W1̟?  SLLL011!W\-[6Ȓ ] x{tt4aaaBpp0l5kV)Ei,,PBi*V|tH! RMbb"O>'<|Ǐ#((pBB^uT먔A_?G=ƹ144335̄RPۯ*=Taa??G/ }IX+BC_k^$!!׼Ly}z-sG_?&&F'OnLL055DB+Vb RP>eUI !"BLL !"B@!ĤB!21)BL3 _IENDB`Text-RecordParser-v1.6.3/bin/tab2graph000555000765000024 1137212201220566 17504 0ustar00kclarkstaff000000000000#!/usr/bin/env perl use strict; use warnings; use version; use English qw( -no_match_vars ); use File::Basename; use Getopt::Long; use GraphViz; use List::Util qw( max ); use Pod::Usage; use Readonly; use Scalar::Util qw( openhandle ); use Text::RecordParser; Readonly our $VERSION => 1.01; my $add_color = 0; my $fields = ''; my $fs = qq{\t}; my $is_directed = 0; my $layout = 'circo'; my $out_file = ''; my $out_format = 'png'; my $rs = qq{\n}; my $show_numbers = 0; my ( $help, $man_page, $show_version ); GetOptions( 'c|color' => \$add_color, 'd|directed' => \$is_directed, 'format:s' => \$out_format, 'fs:s' => \$fs, 'f|fields:s' => \$fields, 'help' => \$help, 'l|layout:s' => \$layout, 'man' => \$man_page, 'n|numbers' => \$show_numbers, 'o|out:s' => \$out_file, 'rs:s' => \$rs, 'version' => \$show_version, ) or pod2usage; if ( $help || $man_page ) { pod2usage({ -exitval => 0, -verbose => $man_page ? 2 : 1 }); }; if ( $show_version ) { my $prog = basename( $PROGRAM_NAME ); print "$prog $VERSION\n"; exit 0; } if ( !@ARGV ) { pod2usage('No input files'); } elsif ( @ARGV > 1 ) { pod2usage('Too many input files'); } my %field_filter = map { $_, 1 } split( /\s*,\s*/, $fields ); my $file = shift @ARGV; my $p = Text::RecordParser->new( filename => $file, field_separator => $fs, record_separator => $rs, trim => 1, ); my $g = GraphViz->new( directed => $is_directed, layout => $layout, bgcolor => $add_color ? 'lightgoldenrodyellow' : 'white', no_overlap => 1, node => { style => 'filled', fillcolor => 'white', } ); my @cols = $p->field_list; my %col_pos; for my $i ( 1..$#cols ) { # skip first col my $col_name = $cols[ $i ]; if ( %field_filter ) { next unless $field_filter{ $col_name }; } $g->add_node( $col_name ); $col_pos{ $i } = $col_name; } my $records = $p->fetchall_arrayref; my @edges; for my $data ( @$records ) { my $node_name = $data->[0]; $g->add_node( $node_name ); for my $i ( 1..$#cols ) { my $val = $data->[ $i ] or next; my $col = $col_pos{ $i } or next; if ( %field_filter ) { next unless defined $field_filter{ $col }; } if ( $val =~ /^\d+$/ && $val > 0 ) { push @edges, [ $cols[ $i ], $node_name, $val ]; } } } my $max_val = max( map { $_->[-1] } @edges ); for my $edge ( @edges ) { my $val = $edge->[-1]; my $weight = ( $val / $max_val ) * .5; $g->add_edge( $edge->[0], $edge->[1], $show_numbers ? ( label => $val ) : (), weight => $weight, ); } my $method = join '', 'as_', lc $out_format; if ( $out_file ) { open my $fh, '>', $out_file or die "Can't write '$out_file': $!\n"; binmode $fh; print $fh $g->$method; close $fh; my $basename = basename( $out_file ); print STDERR qq[Image created "$basename."\n]; } else { print $g->$method; } __END__ # ------------------------------------------------------------------- =pod =head1 NAME tab2graph - turn tabular data into a graph =head1 SYNOPSIS tab2graph [options] file.tab Options: -c|--color Add some color to the output (default is white) -d|--directed Make graph directed (default is not) -l|--layout GraphViz layout; choose from dot, neato, twopi, circo (default), and fdp -f|--fields Restrict to set of fields in first row -n|--numbers Show the numbers (default is not) -o|--out Name of output file (default is STDOUT) --format Output format (default is PNG) --fs=x Use "x" as the field separator (default is tab) --rs=x Use "x" as the record separator (default is newline) --help Show brief help and quit --man Show full documentation =head1 DESCRIPTION Turns tabular data into a graph using GraphViz. This may or may not be useful. =head1 SEE ALSO =over 4 =item * Text::RecordParser =item * GraphViz =back =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 LICENSE AND COPYRIGHT Copyright (C) 2009-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. =cut Text-RecordParser-v1.6.3/bin/tablify000555000765000024 3130112201220566 17256 0ustar00kclarkstaff000000000000#!/usr/bin/env perl use strict; use warnings; use version; use File::Basename; use Getopt::Long; use List::Util 'max'; use Pod::Usage; use Readonly; use Text::Autoformat 'autoformat'; use Text::RecordParser; use Text::TabularDisplay; Readonly our $VERSION => 1.15; Readonly my $DASH => q{-}; Readonly my $EMPTY_STR => q{}; Readonly my $TAB => qq{\t}; Readonly my $NEWLINE => qq{\n}; Readonly my $WIDTH => 78; my $comment_start = ''; my $fs = $TAB; my $rs = $NEWLINE; my $headers = ''; my $no_headers = 0; my $show_vertically = 0; my $no_pager = 0; my $strip_quotes = 0; my ( $show_fields, $list, $limit, @where, $help, $man_page, $show_version ); GetOptions( 'c|comment:s' => \$comment_start, 'fs:s' => \$fs, 'rs:s' => \$rs, 'f|fields:s' => \$show_fields, 'l|list' => \$list, 'i|limit:i' => \$limit, 'w|where:s' => \@where, 'v|vertical' => \$show_vertically, 'strip-quotes' => \$strip_quotes, 'n|no-headers' => \$no_headers, 'h|headers:s' => \$headers, 'no-pager' => \$no_pager, 'help' => \$help, 'man' => \$man_page, 'version' => \$show_version, ) or pod2usage; if ( $help || $man_page ) { pod2usage({ -exitval => 0, -verbose => $man_page ? 2 : 1 }); }; if ( $show_version ) { my $prog = basename( $0 ); print "$prog $VERSION\n"; exit 0; } my $file = shift or pod2usage('No file'); my $p = Text::RecordParser->new( field_separator => $fs, record_separator => $rs, comment => $comment_start ? qr/^$comment_start/ : undef, ); if ( $strip_quotes ) { $p->field_filter( sub { s/^["']//; s/["']$//; $_ } ); } if ( $file eq $DASH ) { $p->fh( \*STDIN ); } else { $p->filename( $file ); } my @fields; if ( @fields = split( /\s*,\s*/, $headers ) ) { $p->bind_fields( @fields ); } elsif ( !$no_headers ) { $p->bind_header; @fields = $p->field_list; } if ( $list ) { pod2usage(q[Can't list fields with --no-headers]) if $no_headers; my $tab = Text::TabularDisplay->new('Field No.', 'Field'); my $i = 1; $tab->add( $i++, $_ ) for @fields; print $tab->render, $NEWLINE; exit 0; } my %where; for ( @where ) { if ( /([\w\d]+)\s*(==|eq|>=?|<=?|=~)\s*(.*)/ ) { my $field = $1; my $op = $2; my $value = $3; unless ( $no_headers ) { my %available = map { $_, 1 } @fields; next unless $available{ $field }; } $field-- if $field =~ /^\d+$/; $where{ $field } = [ $op, $value ]; } } if ( $show_fields ) { my @show = map { $_ =~ m/^\d+$/ && @fields ? $_-1 < scalar @fields ? $_ : () : $_ } map { $_ =~ m/(\d+)-(\d+)/ ? ( $1..$2 ) : $_ } split /,/, $show_fields; my @numbers = grep { /^(\d+)$/ } @show; if ( $no_headers ) { @fields = @show; } else { if ( scalar @show == scalar @numbers ) { # all numbers @numbers = map { $_ - 1 } @numbers; @fields = @fields[ @numbers ]; } elsif ( @show ) { my %available = map { $_, 1 } @fields; my @temp = @fields; @fields = map { $available{ $_ } ? $_ : () } @show; } if ( !@fields ) { die "No fields match in list '$show_fields'\n"; } } } my $fh; my $pager = $ENV{'PAGER'}; if ( !$no_pager && $pager ) { open $fh, "| $pager"; } else { $fh = \*STDOUT; } my $tab = Text::TabularDisplay->new( @fields ); my $max_col_length = 0; my $num_records = 0; my $separator = "************ Record %s ************\n"; if ( $no_headers ) { my @field_names; RECORD: while ( my @data = $p->fetchrow_array ) { if ( !@fields ) { @fields = ( 0..$#data ); @field_names = map { 'Field' . ($_+1) } @fields; $max_col_length = max( map { length $_ } @field_names ); $tab->columns( @field_names ); } for my $field ( keys %where ) { my ( $op, $value ) = @{ $where{ $field } }; my $cmd = "'$data[ ($field - 1) ]' $op $value"; next RECORD unless eval $cmd; } $num_records++; if ( $show_vertically ) { printf $fh $separator, $num_records; for my $i ( @fields ) { printf $fh "%${max_col_length}s: %s\n", $field_names[ $i ], defined $data[ $i ] ? $data[ $i ] : q{}; } } else { $tab->add( map { $data[ $_ ] } @fields ); } last if $limit && $num_records >= $limit; } } else { $max_col_length = max map { $_ ? length $_ : 0 } $p->field_list; RECORD: while ( my $data = $p->fetchrow_hashref ) { for my $field ( keys %where ) { my ( $op, $value ) = @{ $where{ $field } }; my $cmd = "'$data->{ $field }' $op $value"; next RECORD unless eval $cmd; } $num_records++; if ( $show_vertically ) { printf $fh $separator, $num_records; for my $field ( @fields ) { next unless $field; my $v = defined $data->{ $field } ? $data->{ $field } : q{}; if ( length $v > $WIDTH ) { ( $v = autoformat( $v, { left => $max_col_length + 3 } ) ) =~ s/^\s+|\s+$//g; } printf $fh "%${max_col_length}s: %s\n", $field, $v; } } else { $tab->add( map { $data->{ $_ } } @fields ); } last if $limit && $num_records >= $limit; } } if ( !$show_vertically ) { print $fh $tab->render; } print $fh $num_records ? sprintf( "\n%s record%s returned\n", $num_records, $num_records > 1 ? 's' : $EMPTY_STR ) : "\nNo records returned\n"; close $fh; __END__ # ------------------------------------------------------------------- # $Id: tablify,v 1.14 2006/03/07 17:20:00 kclark Exp $ =pod =head1 NAME tablify - turn a delimited text file into a text table =head1 SYNOPSIS tablify [options] file Options: -h|--help Show help -c|--comment Define the beginning of a (single-line) comment -n|--no-headers Assume first line is data, not headers --no-pager Do not use $ENV{'PAGER'} even if defined --strip-quotes Strip " or ' around fields -l|--list List the fields in the file (for use with -f) -f|--fields=f1[,f2] Show only fields in comma-separated list; when used in conjunction with "no-headers" the list should be field numbers (starting at 1); otherwise, should be field names -w|where=fv Apply the "cmp" Perl operator to restrict output where field "f" matches the value "v"; acceptable operators include ==, eq, >, >=, <=, and =~ -v|--vertical Show records vertically -i|--limit=n Limit to given number of records --fs=x Use "x" as the field separator (default is tab "\t") --rs=x Use "x" as the record separator (default is newline "\n") --as-html Create an HTML table instead of plain text --headers Comma-separated list of names matching the number of columns =head1 DESCRIPTION This script is essentially a quick way to parse a delimited text file and view it as a nice ASCII table. By selecting only certain B, employing a B clause to only select records where a field matches some value, and using the B to only see some of the output, you almost have a mini-database front-end for a simple text file. =head1 EXAMPLES Given a data file like this: name,rank,serial_no,is_living,age George,General,190293,0,64 Dwight,General,908348,0,75 Attila,Hun,,0,56 Tojo,Emporor,,0,87 Tommy,General,998110,1,54 To find the fields you can reference, use the B option: $ tablify --fs ',' -l people.dat +-----------+-----------+ | Field No. | Field | +-----------+-----------+ | 1 | name | | 2 | rank | | 3 | serial_no | | 4 | is_living | | 5 | age | +-----------+-----------+ To extract just the name and serial numbers, use the B option: $ tablify --fs ',' -f name,serial_no people.dat +--------+-----------+ | name | serial_no | +--------+-----------+ | George | 190293 | | Dwight | 908348 | | Attila | | | Tojo | | | Tommy | 998110 | +--------+-----------+ 5 records returned To extract the first through third fields and the fifth field (where field numbers start at "1" -- tip: use the B option to quickly determine field numbers), use this syntax for B: $ tablify --fs ',' -f 1-3,5 people.dat +--------+---------+-----------+------+ | name | rank | serial_no | age | +--------+---------+-----------+------+ | George | General | 190293 | 64 | | Dwight | General | 908348 | 75 | | Attila | Hun | | 56 | | Tojo | Emporor | | 87 | | Tommy | General | 998110 | 54 | +--------+---------+-----------+------+ 5 records returned To select only the ones with six serial numbers, use a B clause: $ tablify --fs ',' -w 'serial_no=~/^\d{6}$/' people.dat +--------+---------+-----------+-----------+------+ | name | rank | serial_no | is_living | age | +--------+---------+-----------+-----------+------+ | George | General | 190293 | 0 | 64 | | Dwight | General | 908348 | 0 | 75 | | Tommy | General | 998110 | 1 | 54 | +--------+---------+-----------+-----------+------+ 3 records returned To find Dwight's record, you would do this: $ tablify --fs ',' -w 'name eq "Dwight"' people.dat +--------+---------+-----------+-----------+------+ | name | rank | serial_no | is_living | age | +--------+---------+-----------+-----------+------+ | Dwight | General | 908348 | 0 | 75 | +--------+---------+-----------+-----------+------+ 1 record returned To find the name of all the people with a serial number who are living: $ tablify --fs ',' -f name -w 'is_living==1' -w 'serial_no>0' people.dat +-------+ | name | +-------+ | Tommy | +-------+ 1 record returned To filter outside of program and simply format the results, use "-" as the last argument to force reading of STDIN (and probably assume no headers): $ grep General people.dat | tablify --fs ',' -f 1-3 --no-headers - +---------+--------+--------+ | Field1 | Field2 | Field3 | +---------+--------+--------+ | General | 190293 | 0 | | General | 908348 | 0 | | General | 998110 | 1 | +---------+--------+--------+ 3 records returned When dealing with data lacking field names, you can specify "no-headers" and then refer to fields by number (starting at one), e.g.: $ tail -5 people.dat | tablify --fs ',' --no-headers -w '3 eq "General"' - +--------+---------+--------+--------+--------+ | Field1 | Field2 | Field3 | Field4 | Field5 | +--------+---------+--------+--------+--------+ | George | General | 190293 | 0 | 64 | | Dwight | General | 908348 | 0 | 75 | | Tommy | General | 998110 | 1 | 54 | +--------+---------+--------+--------+--------+ 3 records returned If your file has many fields which are hard to see across the screen, consider using the vertical display with "-v" or "--vertical", e.g.: $ tablify --fs ',' -v --limit 1 people.dat ************ Record 1 ************ name: George rank: General serial_no: 190293 is_living: 0 age : 64 1 record returned =head1 SEE ALSO =over 4 =item * Text::RecordParser =item * Text::TabularDisplay =item * DBD::CSV Although I don't DBD::CSV this module, the idea was much the inspiration for this. I just didn't want to have to install DBI and DBD::CSV to get this kind of functionality. I think my interface is simpler. =back =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 LICENSE AND COPYRIGHT Copyright (C) 2006-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. =cut Text-RecordParser-v1.6.3/bin/tabmerge000555000765000024 2355712201220566 17430 0ustar00kclarkstaff000000000000#!/usr/bin/env perl use strict; use warnings; use version; use English qw( -no_match_vars ); use File::Basename; use Getopt::Long; use Pod::Usage; use Readonly; use Text::RecordParser; use Text::TabularDisplay; Readonly our $VERSION => 1.12; Readonly my $COMMA => q{,}; Readonly my $COMMA_SPACE => q{, }; Readonly my $DASH => q{-}; Readonly my $EMPTY_STR => q{}; Readonly my $TAB => qq{\t}; Readonly my $NEWLINE => qq{\n}; my $fs = $TAB; my $rs = $NEWLINE; my ( $fields, $min, $max, $list, $sort, $stdout, $help, $man_page, $show_version ); GetOptions( 'f|fields:s' => \$fields, 'help' => \$help, 'fs:s' => \$fs, 'l|list' => \$list, 'man' => \$man_page, 'min' => \$min, 'max' => \$max, 'rs:s' => \$rs, 's|sort:s' => \$sort, 'stdout' => \$stdout, 'version' => \$show_version, ) or pod2usage; if ( $help || $man_page ) { pod2usage({ -exitval => 0, -verbose => $man_page ? 2 : 1 }); }; if ( $show_version ) { my $prog = basename( $PROGRAM_NAME ); print "$prog $VERSION\n"; exit 0; } if ( scalar @ARGV < 2 ) { pod2usage('Please supply two or more files'); } my @files = @ARGV or pod2usage('No input files'); $min = 1 unless $max or $fields; my $p = Text::RecordParser->new( field_separator => $fs, record_separator => $rs, ); if ( $list ) { list( $p, @files ); } elsif ( $min || $max || $fields ) { intersection( fields => $fields, files => \@files, parser => $p, sort => $sort, type => $min ? 'min' : 'max', ); } else { pod2usage(q[Can't figure out what to do.]); } # ------------------------------------------------------------------- sub intersection { my %args = @_; my $p = $args{'parser'}; my $type = $args{'type'}; my $files = $args{'files'}; my $fields = $args{'fields'} || $EMPTY_STR; # optional my $sort = $args{'sort'} || $EMPTY_STR; # optional my %fields; for my $file ( @$files ) { $p->filename( $file ); $p->bind_header; $fields{ $_ }++ for $p->field_list; } my @intersect; if ( $fields ) { @intersect = map { s/^\s+|\s+$//g; $_ } split /$COMMA/, $fields; die_if_bad( \%fields, \@intersect ); } else { my $no_files = scalar @$files; for my $fld ( keys %fields ) { push @intersect, $fld if ( $type eq 'max' ) || ( $type eq 'min' && $fields{ $fld } == $no_files ) ; } @intersect = sort @intersect; } die "No intersection!\n" unless @intersect; my @data; for my $file ( @$files ) { $p->filename( $file ); $p->bind_header; while ( my $rec = $p->fetchrow_hashref ) { push @data, { map { $_, $rec->{ $_ } } @intersect }; } } if ( my @sort = map { s/^\s+|\s+$//g; $_ } split /$COMMA/, $sort ) { die_if_bad( \%fields, \@sort ); @data = map { $_->[1] } sort { $a->[0] cmp $b->[0] } map { [ join($DASH, @{ $_ }{ @sort } ), $_ ] } @data; } if ( $stdout ) { print join( $fs, @intersect ), $rs; for my $rec ( @data ) { print join( $fs, map { defined $rec->{$_} ? $rec->{$_} : $EMPTY_STR } @intersect ), $rs; } } else { my $tab = Text::TabularDisplay->new( @intersect ); for my $rec ( @data ) { $tab->add( map { $rec->{ $_ } } @intersect ); } print $tab->render, $NEWLINE; } } # ------------------------------------------------------------------- sub list { my ( $p, @files ) = @_; my %fields; for my $file ( @files ) { $p->filename( $file ); $p->bind_header; $fields{ $_ }++ for $p->field_list; } my $tab = Text::TabularDisplay->new('Field', 'No. Times Present'); $tab->add( $_, $fields{ $_ } ) for sort keys %fields; print $tab->render, $NEWLINE; } # ------------------------------------------------------------------- sub die_if_bad { my ( $fields, $check ) = @_; my @bad; for my $fld ( @$check ) { push @bad, $fld unless $fields->{ $fld }; } if ( @bad ) { die sprintf "Bad field name%s: %s$NEWLINE", scalar @bad > 1 ? 's' : $EMPTY_STR, join $COMMA_SPACE, @bad ; } else { return 1; } } __END__ # ------------------------------------------------------------------- # $Id: tabmerge,v 1.11 2006/03/07 17:20:00 kclark Exp $ =pod =head1 NAME tabmerge - unify delimited files on common fields =head1 SYNOPSIS tabmerge [action] [options] file1 file2 [...] Actions: --min Take only fields present in all files [DEFAULT] --max Take all fields present -f|--fields=f1[,f2] Take only the fields mentioned in the comma-separated list Options: -l|--list List available fields --fs=x Use "x" as the field separator (default is tab "\t") --rs=x Use "x" as the record separator (default is newline "\n") -s|--sort=f1[,f2] Sort data ASCII-betically on field(s) --stdout Print data in original delimited format (i.e., not in a table format) --help Show brief help and quit --man Show full documentation =head1 DESCRIPTION This program merges the fields -- not the rows -- of delimited text files. That is, if several files are almost but not quite entirely unlike each other in their structure (in their field names, numbers or orders), this script allows you to easily unify the files into one file with all the same fields. The output can be based on fields as determined by the three "action" flags. For the following examples, consider three files that contain the following fields: +------------+---------------------------------+ | File | Fields | +------------+---------------------------------+ | merge1.tab | name, type, position | | merge2.tab | name, type, position, lod_score | | merge3.tab | name, position | +------------+---------------------------------+ To list all available fields in the files and the number of times they are present: $ tabmerge --list merge* +-----------+-------------------+ | Field | No. Times Present | +-----------+-------------------+ | lod_score | 1 | | name | 3 | | position | 3 | | type | 2 | +-----------+-------------------+ To merge the files on the minimum overlapping fields: $ tabmerge merge* +----------+----------+ | name | position | +----------+----------+ | RM104 | 2.30 | | RM105 | 4.5 | | TX5509 | 10.4 | | UU189 | 19.0 | | Xpsm122 | 3.3 | | Xpsr9556 | 4.5 | | DRTL | 2.30 | | ALTX | 4.5 | | DWRF | 10.4 | +----------+----------+ To merge the files and include all the fields: $ tabmerge --max merge* +-----------+----------+----------+--------+ | lod_score | name | position | type | +-----------+----------+----------+--------+ | | RM104 | 2.30 | RFLP | | | RM105 | 4.5 | RFLP | | | TX5509 | 10.4 | AFLP | | 2.4 | UU189 | 19.0 | SSR | | 1.2 | Xpsm122 | 3.3 | Marker | | 1.2 | Xpsr9556 | 4.5 | Marker | | | DRTL | 2.30 | | | | ALTX | 4.5 | | | | DWRF | 10.4 | | +-----------+----------+----------+--------+ To merge and extract just the "name" and "type" fields: $ tabmerge -f name,type merge* +----------+--------+ | name | type | +----------+--------+ | RM104 | RFLP | | RM105 | RFLP | | TX5509 | AFLP | | UU189 | SSR | | Xpsm122 | Marker | | Xpsr9556 | Marker | | DRTL | | | ALTX | | | DWRF | | +----------+--------+ To merge the files on just the "name" and "lod_score" fields and sort on the name: $ tabmerge -f name,lod_score -s name merge* +----------+-----------+ | name | lod_score | +----------+-----------+ | ALTX | | | DRTL | | | DWRF | | | RM104 | | | RM105 | | | TX5509 | | | UU189 | 2.4 | | Xpsm122 | 1.2 | | Xpsr9556 | 1.2 | +----------+-----------+ To do the same but mimic the original tab-delimited input: $ tabmerge -f name,lod_score -s name --stdout merge* name lod_score ALTX DRTL DWRF RM104 RM105 TX5509 UU189 2.4 Xpsm122 1.2 Xpsr9556 1.2 Why would you want to do this? Suppose you have several delimited text files with nearly the same structure and want to create just one file from them, but the fields may be in a different order in each file and/or some files may contain more or fewer fields than others. (As far-fetched as it may seem, it happens to the author more than he'd like.) =head1 SEE ALSO =over 4 =item * Text::RecordParser =item * Text::TabularDisplay =back =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 LICENSE AND COPYRIGHT Copyright (C) 2006-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. =cut Text-RecordParser-v1.6.3/lib000755000765000024 012201220566 15521 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/lib/Text000755000765000024 012201220566 16445 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/lib/Text/RecordParser.pm000444000765000024 6600612201220566 21563 0ustar00kclarkstaff000000000000package Text::RecordParser; =head1 NAME Text::RecordParser - read record-oriented files =head1 SYNOPSIS use Text::RecordParser; # use default record (\n) and field (,) separators my $p = Text::RecordParser->new( $file ); # or be explicit my $p = Text::RecordParser->new({ filename => $file, field_separator => "\t", }); $p->filename('foo.csv'); # Split records on two newlines $p->record_separator("\n\n"); # Split fields on tabs $p->field_separator("\t"); # Skip lines beginning with hashes $p->comment( qr/^#/ ); # Trim whitespace $p->trim(1); # Use the fields in the first line as column names $p->bind_header; # Get a list of the header fields (in order) my @columns = $p->field_list; # Extract a particular field from the next row my ( $name, $age ) = $p->extract( qw[name age] ); # Return all the fields from the next row my @fields = $p->fetchrow_array; # Define a field alias $p->set_field_alias( name => 'handle' ); # Return all the fields from the next row as a hashref my $record = $p->fetchrow_hashref; print $record->{'name'}; # or print $record->{'handle'}; # Return the record as an object with fields as accessors my $object = $p->fetchrow_object; print $object->name; # or $object->handle; # Get all data as arrayref of arrayrefs my $data = $p->fetchall_arrayref; # Get all data as arrayref of hashrefs my $data = $p->fetchall_arrayref( { Columns => {} } ); # Get all data as hashref of hashrefs my $data = $p->fetchall_hashref('name'); =head1 DESCRIPTION This module is for reading record-oriented data in a delimited text file. The most common example have records separated by newlines and fields separated by commas or tabs, but this module aims to provide a consistent interface for handling sequential records in a file however they may be delimited. Typically this data lists the fields in the first line of the file, in which case you should call C to bind the field name (or not, and it will be called implicitly). If the first line contains data, you can still bind your own field names via C. Either way, you can then use many methods to get at the data as arrays or hashes. =head1 METHODS =cut use strict; use warnings; use version; use Carp qw( croak ); use IO::Scalar; use List::MoreUtils qw( uniq ); use Readonly; use Text::ParseWords qw( parse_line ); our $VERSION = version->new('1.6.3'); Readonly my $COMMA => q{,}; Readonly my $EMPTY_STR => q{}; Readonly my $NEW_LINE => qq{\n}; Readonly my $PIPE => q{|}; # ---------------------------------------------------------------- sub new { =pod =head2 new This is the object constructor. It takes a hash (or hashref) of arguments. Each argument can also be set through the method of the same name. =over 4 =item * filename The path to the file being read. If the filename is passed and the fh is not, then it will open a filehandle on that file and sets C accordingly. =item * comment A compiled regular expression identifying comment lines that should be skipped. =item * data The data to read. =item * fh The filehandle of the file to read. =item * field_separator | fs The field separator (default is comma). =item * record_separator | rs The record separator (default is newline). =item * field_filter A callback applied to all the fields as they are read. =item * header_filter A callback applied to the column names. =item * trim Boolean to enable trimming of leading and trailing whitespace from fields (useful if splitting on whitespace only). =back See methods for each argument name for more information. Alternately, if you supply a single argument to C, it will be treated as the C argument. =cut my $class = shift; my $args = defined $_[0] && UNIVERSAL::isa( $_[0], 'HASH' ) ? shift : scalar @_ == 1 ? { filename => shift } : { @_ }; my $self = bless {}, $class; if ( my $fs = $args->{'fs'} ) { $args->{'field_separator'} = $fs; delete $args->{'fs'}; } if ( my $rs = $args->{'rs'} ) { $args->{'record_separator'} = $rs; delete $args->{'rs'}; } my $data_handles = 0; for my $arg ( qw[ filename fh header_filter field_filter trim field_separator record_separator data comment ] ) { next if !defined $args->{ $arg }; if ( $arg =~ / \A (filename|fh|data) \Z /xms ) { $data_handles++; } $self->$arg( $args->{ $arg } ); } if ( $data_handles > 1 ) { croak 'Passed too many arguments to read the data. '. 'Please choose only one of "filename," "fh," or "data."' ; } return $self; } # ---------------------------------------------------------------- sub bind_fields { =pod =head2 bind_fields $p->bind_fields( qw[ name rank serial_number ] ); Takes an array of field names and memorizes the field positions for later use. If the input file has no header line but you still wish to retrieve the fields by name (or even if you want to call C and then give your own field names), simply pass in the an array of field names you wish to use. Pass in an empty array reference to unset: $p->bind_field( [] ); # unsets fields =cut my $self = shift; # using an empty arrayref to unset if ( ref $_[0] eq 'ARRAY' && !@{ $_[0] } ) { $self->{'field_pos_ordered'} = []; $self->{'field_pos'} = {}; $self->{'fields_bound'} = 0; } elsif ( @_ ) { my @fields = @_; $self->{'field_pos_ordered'} = [ @fields ]; my %field_pos; for my $i ( 0 .. $#fields ) { next unless $fields[ $i ]; $field_pos{ $fields[ $i ] } = $i; } $self->{'field_pos'} = \%field_pos; $self->{'fields_bound'} = 1; } else { croak 'Bind fields called without field list'; } return 1; } # ---------------------------------------------------------------- sub bind_header { =pod =head2 bind_header $p->bind_header; my $name = $p->extract('name'); Takes the fields from the next row under the cursor and assigns the field names to the values. Usually you would call this immediately after opening the file in order to bind the field names in the first row. =cut my $self = shift; if ( my @columns = $self->fetchrow_array ) { if ( my $filter = $self->header_filter ) { for my $i ( 0 .. $#columns ) { $columns[ $i ] = $filter->( $columns[ $i ] ); } } $self->bind_fields( @columns ); } else { croak q[Can't find columns in file '], $self->filename, q[']; } return 1; } # ---------------------------------------------------------------- sub comment { =pod =head2 comment $p->comment( qr/^#/ ); # Perl-style comments $p->comment( qr/^--/ ); # SQL-style comments Takes a regex to apply to a record to see if it looks like a comment to skip. =cut my $self = shift; if ( my $arg = shift ) { if ( ref $arg ne 'Regexp' ) { croak q[Argument to comment doesn't look like a regex]; } $self->{'comment'} = $arg; } return defined $self->{'comment'} ? $self->{'comment'} : $EMPTY_STR; } # ---------------------------------------------------------------- sub data { =pod =head2 data $p->data( $string ); $p->data( \$string ); $p->data( @lines ); $p->data( [$line1, $line2, $line3] ); $p->data( IO::File->new(' as it will read the entire contents of the file rather than one line at a time if you set it via C. =cut my $self = shift; my $data; if (@_) { my $arg = shift; if ( UNIVERSAL::isa( $arg, 'SCALAR' ) ) { $data = $$arg; } elsif ( UNIVERSAL::isa( $arg, 'ARRAY' ) ) { $data = join $EMPTY_STR, @$arg; } elsif ( UNIVERSAL::isa( $arg, 'GLOB' ) ) { local $/; $data = <$arg>; } elsif ( !ref($arg) && @_ ) { $data = join $EMPTY_STR, $arg, @_; } else { $data = $arg; } } else { croak 'Data called without any arguments'; } if ( $data ) { my $fh = IO::Scalar->new( \$data ); $self->fh( $fh ); } else { croak 'No usable data'; } return 1; } # ---------------------------------------------------------------- sub extract { =pod =head2 extract my ( $foo, $bar, $baz ) = $p->extract( qw[ foo bar baz ] ); Extracts a list of fields out of the last row read. The field names must correspond to the field names bound either via C or C. =cut my $self = shift; my @fields = @_ or return; my %allowed = map { $_, 1 } $self->field_list; my $record = $self->fetchrow_hashref or return; my @data; foreach my $field ( @fields ) { if ( $allowed{ $field } ) { push @data, $record->{ $field }; } else { croak "Invalid field $field for file " . $self->filename . $NEW_LINE . 'Valid fields are: ' . join(', ', $self->field_list) . $NEW_LINE ; } } return scalar @data == 1 ? $data[0] : @data; } # ---------------------------------------------------------------- sub fetchrow_array { =pod =head2 fetchrow_array my @values = $p->fetchrow_array; Reads a row from the file and returns an array or array reference of the fields. =cut my $self = shift; my $fh = $self->fh or croak 'No filename or file handle'; my $comment = $self->comment; local $/ = $self->record_separator; my $line; my $line_no = 0; for ( ;; ) { $line_no++; defined( $line = <$fh> ) or return; chomp $line; next if $comment and $line =~ $comment; $line =~ s/(?field_separator; $separator eq $PIPE and $separator = '\|'; my @fields = map { defined $_ && $_ =~ s/\\'/'/g; $_ } ( ( ref $separator eq 'Regexp' ) ? parse_line( $separator, 0, $line ) : parse_line( $separator, 1, $line ) ); if ( !@fields ) { croak "Error reading line number $line_no:\n$line"; } if ( my $filter = $self->field_filter ) { @fields = map { $filter->( $_ ) } @fields; } if ( $self->trim ) { @fields = map { defined $_ && s/^\s+|\s+$//g; $_ } @fields; } while ( my ( $position, $callback ) = each %{ $self->field_compute } ) { next if $position !~ m/^\d+$/; $fields[ $position ] = $callback->( $fields[ $position ], \@fields ); } return wantarray ? @fields : \@fields; } # ---------------------------------------------------------------- sub fetchrow_hashref { =pod =head2 fetchrow_hashref my $record = $p->fetchrow_hashref; print "Name = ", $record->{'name'}, "\n"; Reads a line of the file and returns it as a hash reference. The keys of the hashref are the field names bound via C or C. If you do not bind fields prior to calling this method, the C method will be implicitly called for you. =cut my $self = shift; my @fields = $self->field_list or return; my @row = $self->fetchrow_array or return; my $i = 0; my %return; for my $field ( @fields ) { next unless defined $row[ $i ]; $return{ $field } = $row[ $i++ ]; if ( my @aliases = $self->get_field_aliases( $field ) ) { $return{ $_ } = $return{ $field } for @aliases; } } while ( my ( $position, $callback ) = each %{ $self->field_compute } ) { $return{ $position } = $callback->( $return{ $position }, \%return ); } return \%return; } # ---------------------------------------------------------------- sub fetchrow_object { =pod =head2 fetchrow_object while ( my $object = $p->fetchrow_object ) { my $id = $object->id; my $name = $object->naem; # <-- this will throw a runtime error } This will return the next data record as a Text::RecordParser::Object object that has read-only accessor methods of the field names and any aliases. This allows you to enforce field names, further helping ensure that your code is reading the input file correctly. That is, if you are using the "fetchrow_hashref" method to read each line, you may misspell the hash key and introduce a bug in your code. With this method, Perl will throw an error if you attempt to read a field not defined in the file's headers. Additionally, any defined field aliases will be created as additional accessor methods. =cut my $self = shift; my $row = $self->fetchrow_hashref or return; my @fields = $self->field_list or return; push @fields, map { $self->get_field_aliases( $_ ) } @fields; return Text::RecordParser::Object->new( \@fields, $row ); } # ---------------------------------------------------------------- sub fetchall_arrayref { =pod =head2 fetchall_arrayref my $records = $p->fetchall_arrayref; for my $record ( @$records ) { print "Name = ", $record->[0], "\n"; } my $records = $p->fetchall_arrayref( { Columns => {} } ); for my $record ( @$records ) { print "Name = ", $record->{'name'}, "\n"; } Like DBI's fetchall_arrayref, returns an arrayref of arrayrefs. Also accepts optional "{ Columns => {} }" argument to return an arrayref of hashrefs. =cut my $self = shift; my %args = defined $_[0] && ref $_[0] eq 'HASH' ? %{ shift() } : @_ % 2 == 0 ? @_ : (); my $method = ref $args{'Columns'} eq 'HASH' ? 'fetchrow_hashref' : 'fetchrow_array'; my @return; while ( my $record = $self->$method() ) { push @return, $record; } return \@return; } # ---------------------------------------------------------------- sub fetchall_hashref { =pod =head2 fetchall_hashref my $records = $p->fetchall_hashref('id'); for my $id ( keys %$records ) { my $record = $records->{ $id }; print "Name = ", $record->{'name'}, "\n"; } Like DBI's fetchall_hashref, this returns a hash reference of hash references. The keys of the top-level hashref are the field values of the field argument you supply. The field name you supply can be a field created by a C. =cut my $self = shift; my $key_field = shift(@_) || return croak('No key field'); my @fields = $self->field_list; my ( %return, $field_ok ); while ( my $record = $self->fetchrow_hashref ) { if ( !$field_ok ) { if ( !exists $record->{ $key_field } ) { croak "Invalid key field: '$key_field'"; } $field_ok = 1; } $return{ $record->{ $key_field } } = $record; } return \%return; } # ---------------------------------------------------------------- sub fh { =pod =head2 fh open my $fh, '<', $file or die $!; $p->fh( $fh ); Gets or sets the filehandle of the file being read. =cut my ( $self, $arg ) = @_; if ( defined $arg ) { if ( ! UNIVERSAL::isa( $arg, 'GLOB' ) ) { croak q[Argument to fh doesn't look like a filehandle]; } if ( defined $self->{'fh'} ) { close $self->{'fh'} or croak "Can't close existing filehandle: $!"; } $self->{'fh'} = $arg; $self->{'filename'} = $EMPTY_STR; } if ( !defined $self->{'fh'} ) { if ( my $file = $self->filename ) { open my $fh, '<', $file or croak "Cannot read '$file': $!"; $self->{'fh'} = $fh; } } return $self->{'fh'}; } # ---------------------------------------------------------------- sub field_compute { =pod =head2 field_compute A callback applied to the fields identified by position (or field name if C or C was called). The callback will be passed two arguments: =over 4 =item 1 The current field =item 2 A reference to all the other fields, either as an array or hash reference, depending on the method which you called. =back If data looks like this: parent children Mike Greg,Peter,Bobby Carol Marcia,Jane,Cindy You could split the "children" field into an array reference with the values like so: $p->field_compute( 'children', sub { [ split /,/, shift() ] } ); The field position or name doesn't actually have to exist, which means you could create new, computed fields on-the-fly. E.g., if you data looks like this: 1,3,5 32,4,1 9,5,4 You could write a field_compute like this: $p->field_compute( 3, sub { my ( $cur, $others ) = @_; my $sum; $sum += $_ for @$others; return $sum; } ); Field "3" will be created as the sum of the other fields. This allows you to further write: my $data = $p->fetchall_arrayref; for my $rec ( @$data ) { print "$rec->[0] + $rec->[1] + $rec->[2] = $rec->[3]\n"; } Prints: 1 + 3 + 5 = 9 32 + 4 + 1 = 37 9 + 5 + 4 = 18 =cut my $self = shift; if ( @_ ) { my ( $position, $callback ) = @_; if ( $position !~ /\S+/ ) { croak 'No usable field name or position'; } if ( ref $callback ne 'CODE' ) { croak 'Callback not code reference'; } $self->{'field_computes'}{ $position } = $callback; } return $self->{'field_computes'} || {}; } # ---------------------------------------------------------------- sub field_filter { =pod =head2 field_filter $p->field_filter( sub { $_ = shift; uc(lc($_)) } ); A callback which is applied to each field. The callback will be passed the current value of the field. Whatever is passed back will become the new value of the field. The above example capitalizes field values. To unset the filter, pass in the empty string. =cut my ( $self, $filter ) = @_; if ( defined $filter ) { if ( $filter eq $EMPTY_STR ) { $self->{'field_filter'} = $EMPTY_STR; # allows nullification } elsif ( ref $filter eq 'CODE' ) { $self->{'field_filter'} = $filter; } else { croak q[Argument to field_filter doesn't look like code]; } } return $self->{'field_filter'} || $EMPTY_STR; } # ---------------------------------------------------------------- sub field_list { =pod =head2 field_list $p->bind_fields( qw[ foo bar baz ] ); my @fields = $p->field_list; print join ', ', @fields; # prints "foo, bar, baz" Returns the fields bound via C (or C). =cut my $self = shift; if ( !$self->{'fields_bound'} ) { $self->bind_header; } if ( ref $self->{'field_pos_ordered'} eq 'ARRAY' ) { return @{ $self->{'field_pos_ordered'} }; } else { croak 'No fields. Call "bind_fields" or "bind_header" first.'; } } # ---------------------------------------------------------------- sub field_positions { =pod =head2 field_positions my %positions = $p->field_positions; Returns a hash of the fields and their positions bound via C (or C). Mostly for internal use. =cut my $self = shift; if ( ref $self->{'field_pos'} eq 'HASH' ) { return %{ $self->{'field_pos'} }; } else { return; } } # ---------------------------------------------------------------- sub field_separator { =pod =head2 field_separator $p->field_separator("\t"); # splits fields on tabs $p->field_separator('::'); # splits fields on double colons $p->field_separator(qr/\s+/); # splits fields on whitespace my $sep = $p->field_separator; # returns the current separator Gets and sets the token to use as the field delimiter. Regular expressions can be specified using qr//. If not specified, it will take a guess based on the filename extension ("comma" for ".txt," ".dat," or ".csv"; "tab" for ".tab"). The default is a comma. =cut my $self = shift; if ( @_ ) { $self->{'field_separator'} = shift; } if ( !$self->{'field_separator'} ) { my $guess; if ( my $filename = $self->filename ) { if ( $filename =~ /\.(csv|txt|dat)$/ ) { $guess = q{,}; } elsif ( $filename =~ /\.tab$/ ) { $guess = qq{\t}; } } if ( $guess ) { $self->{'field_separator'} = $guess; } } return $self->{'field_separator'} || $COMMA; } # ---------------------------------------------------------------- sub filename { =pod =head2 filename $p->filename('/path/to/file.dat'); Gets or sets the complete path to the file to be read. If a file is already opened, then the handle on it will be closed and a new one opened on the new file. =cut my $self = shift; if ( my $filename = shift ) { if ( -d $filename ) { croak "Cannot use directory '$filename' as input source"; } elsif ( -f _ and -r _ ) { if ( my $fh = $self->fh ) { if ( !close($fh) ) { croak "Can't close previously opened filehandle: $!\n"; } $self->{'fh'} = undef; $self->bind_fields([]); } $self->{'filename'} = $filename; } else { croak "Cannot use '$filename' as input source: ", 'file does not exist or is not readable.' ; } } return $self->{'filename'} || $EMPTY_STR; } # ---------------------------------------------------------------- sub get_field_aliases { =pod =head2 get_field_aliases my @aliases = $p->get_field_aliases('name'); Allows you to define alternate names for fields, e.g., sometimes your input file calls city "town" or "township," sometimes a file uses "Moniker" instead of "name." =cut my $self = shift; my $field_name = shift or return; if ( !$self->{'field_alias'} ) { return; } return @{ $self->{'field_alias'}{ $field_name } || [] }; } # ---------------------------------------------------------------- sub header_filter { =pod =head2 header_filter $p->header_filter( sub { $_ = shift; s/\s+/_/g; lc $_ } ); A callback applied to column header names. The callback will be passed the current value of the header. Whatever is returned will become the new value of the header. The above example collapses spaces into a single underscore and lowercases the letters. To unset a filter, pass in the empty string. =cut my ( $self, $filter ) = @_; if ( defined $filter ) { if ( $filter eq $EMPTY_STR ) { $self->{'header_filter'} = $EMPTY_STR; # allows nullification } elsif ( ref $filter eq 'CODE' ) { $self->{'header_filter'} = $filter; if ( my %field_pos = $self->field_positions ) { my @new_order; while ( my ( $field, $order ) = each %field_pos ) { my $xform = $filter->( $field ); $new_order[ $order ] = $xform; } $self->bind_fields( @new_order ); } } else{ croak q[Argument to field_filter doesn't look like code]; } } return $self->{'header_filter'} || $EMPTY_STR; } # ---------------------------------------------------------------- sub record_separator { =pod =head2 record_separator $p->record_separator("\n//\n"); $p->field_separator("\n"); Gets and sets the token to use as the record separator. The default is a newline ("\n"). The above example would read a file that looks like this: field1 field2 field3 // data1 data2 data3 // =cut my $self = shift; if ( @_ ) { $self->{'record_separator'} = shift; } return $self->{'record_separator'} || $NEW_LINE; } # ---------------------------------------------------------------- sub set_field_alias { =pod =head2 set_field_alias $p->set_field_alias({ name => 'Moniker,handle', # comma-separated string city => [ qw( town township ) ], # or anonymous arrayref }); Allows you to define alternate names for fields, e.g., sometimes your input file calls city "town" or "township," sometimes a file uses "Moniker" instead of "name." =cut my $self = shift; my %args = ref $_[0] eq 'HASH' ? %{ $_[0] } : @_; my %is_field = map { $_, 1 } $self->field_list; ARG: while ( my ( $field_name, $aliases ) = each %args ) { if ( ref $aliases ne 'ARRAY' ) { $aliases = [ split(/,/, $aliases) ]; } if ( !$is_field{ $field_name } ) { push @$aliases, $field_name; ( $field_name ) = grep { $is_field{ $_ } } @$aliases; next ARG unless $field_name; } $self->{'field_alias'}{ $field_name } = [ grep { $_ ne $field_name } uniq( @$aliases ) ]; } return 1; } # ---------------------------------------------------------------- sub trim { =pod =head2 trim my $trim_value = $p->trim(1); Provide "true" argument to remove leading and trailing whitespace from fields. Use a "false" argument to disable. =cut my ( $self, $arg ) = @_; if ( defined $arg ) { $self->{'trim'} = $arg ? 1 : 0; } return $self->{'trim'}; } 1; # ---------------------------------------------------------------- # I must Create a System, or be enslav'd by another Man's; # I will not Reason and Compare; my business is to Create. # -- William Blake, "Jerusalem" # ---------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE =head1 SOURCE http://github.com/kyclark/text-recordparser =head1 CREDITS Thanks to the following: =over 4 =item * Benjamin Tilly For Text::xSV, the inspirado for this module =item * Tim Bunce et al. For DBI, from which many of the methods were shamelessly stolen =item * Tom Aldcroft For contributing code to make it easy to parse whitespace-delimited data =item * Liya Ren For catching the column-ordering error when parsing with "no-headers" =item * Sharon Wei For catching bug in C that sets up infinite loops =item * Lars Thegler For bug report on missing "script_files" arg in Build.PL =back =head1 BUGS None known. Please use http://rt.cpan.org/ for reporting bugs. =head1 LICENSE AND COPYRIGHT Copyright (C) 2006-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. =cut Text-RecordParser-v1.6.3/lib/Text/RecordParser000755000765000024 012201220566 21040 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/lib/Text/RecordParser/Object.pm000444000765000024 423012201220566 22740 0ustar00kclarkstaff000000000000package Text::RecordParser::Object; use strict; use warnings; use version; use base qw( Class::Accessor ); our $VERSION = version->new('1.4.0'); sub new { my ( $class, $field_names, $self ) = @_; $class->mk_ro_accessors( @$field_names ); bless $self, $class; return $self; } 1; __END__ # ---------------------------------------------------------------- =pod =head1 NAME Text::RecordParser::Object - read delimited text files as objects =head1 SYNOPSIS my $o = $p->fetchrow_object; my $name = $o->name; =head1 METHOD =head2 new Just call "fetchrow_object" on a Text::RecordParser object to instantiate an object. =head1 DESCRIPTION This module extends the idea of how you interact with delimited text files, allowing you to enforce field names and identify field aliases easily. That is, if you are using the "fetchrow_hashref" method to read each line, you may misspell the hash key and introduce a bug in your code. With this module, Perl will throw an error if you attempt to read a field not defined in the file's headers. Additionally, any defined field aliases will be created as additional accessor methods. As much as I like the full encapsulation of inside-out objects (e.g., as described in _Perl Best Practies_ by Damian Conway and provided by Class::Std), I couldn't figure out a way to dynamically create the class at runtime. Besides, I figure this interface is only for those who want to use the overhead of objects to enforce policy. If you use this module and still access the hash underneath the object, I can't really help you. =head1 SEE ALSO Class::Accessor. =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE =head1 LICENSE AND COPYRIGHT Copyright (C) 2009-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. =cut Text-RecordParser-v1.6.3/lib/Text/RecordParser/Tab.pm000444000765000024 272112201220566 22243 0ustar00kclarkstaff000000000000package Text::RecordParser::Tab; use strict; use warnings; use version; use base qw( Text::RecordParser ); our $VERSION = version->new('1.4.0'); # ---------------------------------------------------------------- sub new { my $class = shift; my $self = $class->SUPER::new( @_ ); $self->field_separator("\t"); return $self; } 1; __END__ # ---------------------------------------------------------------- =pod =head1 NAME Text::RecordParser::Tab - read tab-delimited files =head1 SYNOPSIS use Text::RecordParser::Tab; =head1 DESCRIPTION This module is a shortcut for getting a tab-delimited parser. =head2 new Call "new" as normal but without worrying about "field_separator" or "fs." Because this: my $p = Text::RecordParser::Tab->new($file); Is easier to type than this my $p = Text::RecordParser->new( filename => $file, field_separator => "\t", ); =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE =head1 LICENSE AND COPYRIGHT Copyright (C) 2006-10 Ken Youens-Clark. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. =cut Text-RecordParser-v1.6.3/t000755000765000024 012201220566 15216 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/t/00-pipe.t000444000765000024 131512201220566 16712 0ustar00kclarkstaff000000000000#!/usr/bin/perl # # test for "field_separator" and "record_separator" # use strict; use FindBin '$Bin'; use Test::More tests => 7; use Text::RecordParser; my $p = Text::RecordParser->new( ); is( $p->field_separator, ',', 'Field separator is comma' ); is( $p->field_separator("\t"), "\t", 'Field separator is tab' ); is( $p->field_separator('::'), '::', 'Field separator is double colon' ); is( ref $p->field_separator(qr/\s+/), 'Regexp', 'Field separator is a regular expression' ); is( $p->record_separator, "\n", 'Record separator is newline' ); is( $p->record_separator("\n\n"), "\n\n", 'Record separator is double newline' ); is( $p->record_separator(':'), ':', 'Record separator is colon' ); Text-RecordParser-v1.6.3/t/01-new.t000444000765000024 577112201220566 16561 0ustar00kclarkstaff000000000000#!perl use strict; use File::Spec::Functions; use FindBin '$Bin'; use Readonly; use Test::Exception; use Test::More tests => 26; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); require_ok( 'Text::RecordParser' ); require_ok( 'Text::RecordParser::Tab' ); # # Vanilla "new," test defaults # { my $p = Text::RecordParser->new; isa_ok( $p, 'Text::RecordParser' ); is( $p->filename, '', 'Filename is blank' ); is( $p->fh, undef, 'Filehandle is undefined' ); is( $p->field_filter, '', 'Field filter is blank' ); is( $p->header_filter, '', 'Header filter is blank' ); is( $p->field_separator, ',', 'Default separator is a comma' ); is( $p->trim, undef, 'Default trim value is undefined' ); } # # New with arguments # { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new($file); is( $p->filename, $file, 'Filename sets OK' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( { filename => $file } ); is( $p->filename, $file, 'Filename as hashref sets OK' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( filename => $file, field_separator => "\t", record_separator => "\n\n", field_filter => sub { $_ = shift; s/ /_/g; $_ }, header_filter => sub { $_ = shift; s/\s+/_/g; lc $_ }, trim => 1, ); is( $p->filename, $file, 'Filename set OK' ); is( $p->field_separator, "\t", 'Field separator is a tab' ); is( $p->record_separator, "\n\n", 'Record separator is two newlines' ); is( ref $p->field_filter, 'CODE', 'Field filter is code' ); is( ref $p->header_filter, 'CODE', 'Header filter is code' ); is( $p->trim, 1, 'Trim mode is on' ); } { my $p = Text::RecordParser->new; is( $p->trim, undef, 'trim with no args is undefined' ); is( $p->trim('foo'), 1, 'trim with non-false arg is true' ); is( $p->trim(''), 0, 'trim with false arg is false' ); } # # New with shortened arguments # { my $p = Text::RecordParser->new({ fs => "\t", rs => "\n\n", }); is( $p->field_separator, "\t", 'Shortened field separator arg OK' ); is( $p->record_separator, "\n\n", 'Shortened record separator arg OK' ); } # # New with too many arguments # { throws_ok { my $p = Text::RecordParser->new( filename => catfile( $TEST_DATA_DIR, 'simpsons.csv' ), data => "foo\tbar\tbaz", ); } qr/too many arguments/, 'new dies because of too many data args'; } # # New with just one arg # { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); is( $p->filename, $file, 'One argument taken as filename' ); } # # New Tab # { my $p = Text::RecordParser::Tab->new; isa_ok( $p, 'Text::RecordParser' ); is( $p->field_separator, "\t", 'New T::RP::Tab has tab for field sep' ); } Text-RecordParser-v1.6.3/t/02-filename-fh.t000444000765000024 1302412201220566 20152 0ustar00kclarkstaff000000000000#!perl # # tests for "filename," "fh," "data," etc. # use strict; use File::Spec::Functions; use File::Temp qw( tempfile ); use FindBin qw( $Bin ); use IO::File; use Readonly; use Test::Exception; use Test::More tests => 41; use Text::RecordParser; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $p = Text::RecordParser->new; is( $p->filename, '', 'Filename is blank' ); my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); is( $p->filename($file), $file, 'Filename sets OK' ); throws_ok { $p->filename($TEST_DATA_DIR) } qr/cannot use dir/i, 'filename rejects directory for argument'; my $bad_file = catfile( $TEST_DATA_DIR, 'non-existent' ); throws_ok { $p->filename($bad_file) } qr/file does not exist/i, 'filename rejects non-existent file'; my @fields = $p->field_list; ok( @fields, 'Got field list' ); my $file2 = catfile( $TEST_DATA_DIR, 'simpsons.tab' ); $p->filename( $file2 ); my @fields2 = $p->field_list; ok( join(',', @fields) ne join(',', @fields2), 'Field list is flushed when resetting filename' ); } # # Filehandle tests # { my $p = Text::RecordParser->new; my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); open my $fh, '<', $file or die "Read of '$file' failed: $!"; is ( ref $p->fh( $fh ), 'GLOB', 'fh is a filehandle' ); # Cause an error by closing the existing fh. close $fh; my $tabfile = catfile( $TEST_DATA_DIR, 'simpsons.tab' ); open my $fh2, '<', $tabfile or die "Read of '$tabfile' failed: $!"; throws_ok { $p->fh( $fh2 ) } qr/can't close existing/i, 'fh catches bad close'; throws_ok { $p->fh('') } qr/doesn't look like a filehandle/i, 'fh catches bad arg'; my $file3 = catfile( $TEST_DATA_DIR, 'simpsons.cvs'); my $io = IO::File->new( $file3 ); is ( ref $p->fh( $io ), 'GLOB', 'fh is a filehandle' ); } { # cause an error on a closed filehandle my ( $fh, $filename ) = tempfile(); my $p = Text::RecordParser->new( fh => $fh ); close $fh; unlink $filename; my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); throws_ok { $p->filename( $file ) } qr/Can't close previously opened/, 'filename dies trying to close a closed filehandle'; } { # cause an error on a disappearing file my $p = Text::RecordParser->new; my ( $fh, $filename ) = tempfile(); $p->filename( $filename ); close $fh; unlink $filename; throws_ok { my $data = $p->fh } qr/Cannot read '\Q$filename\E'/, 'fh dies on bad file'; } # # Data tests # { my $p = Text::RecordParser->new; throws_ok { $p->data() } qr/without any arguments/, 'data called without args dies'; } { my $p = Text::RecordParser->new; throws_ok { $p->data('') } qr/no usable/i, 'data dies with no usable data'; } { my $p = Text::RecordParser->new; my $scalar = "lname,fname,age\nSmith,Joan,20\nDoe,James,21\n"; $p->data( \$scalar ); $p->bind_header; my @fields = $p->field_list; is( scalar @fields, 3, 'data accepted scalar ref' ); } { my $p = Text::RecordParser->new; $p->data( "lname,fname,age\n", "Smith,Joan,20\nDoe,James,21\n" ); $p->bind_header; my @fields = $p->field_list; is( scalar @fields, 3, 'data accepted an array' ); } { my $p = Text::RecordParser->new; my @array = ( "lname,fname,age\n", "Smith,Joan,20\nDoe,James,21\n" ); $p->data( \@array ); $p->bind_header; my @fields = $p->field_list; is( scalar @fields, 3, 'data accepted an array ref' ); } { my $p = Text::RecordParser->new; my $scalar = "lname,fname,age\nSmith,Joan,20\nDoe,James,21\n"; ok( $p->data( $scalar ), 'data accepts a scalar' ); $p->bind_header; my @fields = $p->field_list; is( $fields[0], 'lname', 'lname field' ); is( $fields[1], 'fname', 'fname field' ); is( $fields[2], 'age', 'age field' ); my $rec = $p->fetchrow_hashref; is( $rec->{'lname'}, 'Smith', 'lname = "Smith"' ); is( $rec->{'fname'}, 'Joan', 'fname = "Joan"' ); is( $rec->{'age'}, '20', 'age = "20"' ); $rec = $p->fetchrow_array; is( $rec->[0], 'Doe', 'lname = "Doe"' ); is( $rec->[1], 'James', 'fname = "James"' ); is( $rec->[2], '21', 'age = "21"' ); $p->data( "name\tinstrument\n", "Miles Davis\ttrumpet\n", "Art Blakey\tdrums\n" ); $p->field_separator("\t"); $p->bind_header; @fields = $p->field_list; is( $fields[0], 'name', 'name field' ); is( $fields[1], 'instrument', 'instrument field' ); $rec = $p->fetchrow_array; is( $rec->[0], 'Miles Davis', 'name = "Miles Davis"' ); is( $rec->[1], 'trumpet', 'instrument = "trumpet"' ); $rec = $p->fetchrow_hashref; is( $rec->{'name'}, 'Art Blakey', 'name = "Art Blakey"' ); is( $rec->{'instrument'}, 'drums', 'instrument = "drums"' ); my $filename = "$Bin/data/simpsons.csv"; open my $fh, "<$filename" or die "Can't read '$filename': $!"; is ( $p->data( $fh ), 1, 'data accepts a filehandle' ); is ( UNIVERSAL::isa( $p->fh, 'GLOB' ), 1, 'fh is a GLOB' ); } { my $p = Text::RecordParser->new( data => "lname,fname,age\nSmith,Joan,20\nDoe,James,21\n" ); $p->bind_header; my @fields = $p->field_list; is( $fields[0], 'lname', 'lname field' ); is( $fields[1], 'fname', 'fname field' ); is( $fields[2], 'age', 'age field' ); my $rec = $p->fetchrow_hashref; is( $rec->{'lname'}, 'Smith', 'lname = "Smith"' ); is( $rec->{'fname'}, 'Joan', 'fname = "Joan"' ); is( $rec->{'age'}, '20', 'age = "20"' ); } Text-RecordParser-v1.6.3/t/03-separator.t000444000765000024 175712201220566 17772 0ustar00kclarkstaff000000000000#!perl # # test for "field_separator" and "record_separator" # use strict; use File::Spec::Functions; use FindBin qw( $Bin ); use Test::More tests => 8; use Text::RecordParser; use Readonly; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $p = Text::RecordParser->new; is( $p->field_separator, ',', 'Field separator is comma' ); is( $p->field_separator("\t"), "\t", 'Field separator is tab' ); is( $p->field_separator('::'), '::', 'Field separator is double colon' ); is( ref $p->field_separator(qr/\s+/), 'Regexp', 'Field separator is a regular expression' ); is( $p->record_separator, "\n", 'Record separator is newline' ); is( $p->record_separator("\n\n"), "\n\n", 'Record separator is double newline' ); is( $p->record_separator(':'), ':', 'Record separator is colon' ); } { my $p2 = Text::RecordParser->new(catfile($TEST_DATA_DIR, 'simpsons.tab')); is( $p2->field_separator("\t"), "\t", 'Field separator guessed tab' ); } Text-RecordParser-v1.6.3/t/04-bind.t000444000765000024 407312201220566 16701 0ustar00kclarkstaff000000000000#!perl # # tests for "bind_fields" and "bind_header" # use strict; use File::Spec::Functions; use FindBin '$Bin'; use Test::Exception; use Test::More tests => 16; use Text::RecordParser; use Readonly; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $p = Text::RecordParser->new; throws_ok { my @field_list = $p->field_list } qr/no file/i, 'Error on "field_list" with no file'; is( $p->bind_fields(qw[ foo bar baz ]), 1, 'Bind fields successful' ); my @fields = $p->field_list; is( $fields[0], 'foo', 'Field "foo"' ); is( $fields[1], 'bar', 'Field "bar"' ); is( $fields[2], 'baz', 'Field "baz"' ); my $f1 = catfile($TEST_DATA_DIR, 'simpsons.csv'); $p->filename( $f1 ); is( $p->bind_header, 1, 'Bind header successful' ); @fields = $p->field_list; is( $fields[0], 'Name', 'Field "Name"' ); is( $fields[2], 'City', 'Field "City"' ); is( $fields[-1], 'Dependents', 'Field "Dependents"' ); } { my $p = Text::RecordParser->new; throws_ok { $p->bind_fields() } qr/called without field list/i, 'Error on bind_field without args'; } { my $p = Text::RecordParser->new; my %pos1 = $p->field_positions; ok( !%pos1, 'No field positions with unbound headers' ); $p->bind_fields( qw[ foo bar baz ] ); my %pos2 = $p->field_positions; my %should_be = ( foo => 0, bar => 1, baz => 2, ); is_deeply( \%pos2, \%should_be, 'field positions OK' ); } { my $empty_file = catfile( $TEST_DATA_DIR, 'empty' ); my $p = Text::RecordParser->new( $empty_file ); throws_ok { $p->bind_header() } qr/can't find columns in file/i, 'Error on bind_header with empty file'; } { my $p = Text::RecordParser->new; $p->field_separator("\t"); my $f2 = catfile($TEST_DATA_DIR, 'simpsons.tab'); $p->filename( $f2 ); ok( my @fields = $p->field_list, 'bind_header implicitly called' ); is( scalar @fields, 7, 'Found seven fields' ); is( join(',', @fields), 'Name,Address,City,State,Wife,Children,Pets', 'Fields OK'); } Text-RecordParser-v1.6.3/t/05-fetch.t000444000765000024 1547112201220566 17103 0ustar00kclarkstaff000000000000#!perl # # tests for "extract" and "fetch*" methods # use strict; use File::Spec::Functions; use FindBin '$Bin'; use Readonly; use Test::Exception; use Test::More tests => 38; use Text::RecordParser; use Text::RecordParser::Tab; use Text::RecordParser::Object; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; # Extract nothing my $undef = $p->extract; is( $undef, undef, 'Fetched nothing' ); # Extract one thing my $name = $p->extract('Name'); is( $name, '"Simpson, Homer"', 'Name is "Simpson, Homer"' ); # Extract several things my ( $address, $city ) = $p->extract(qw[ Address City ]); is( $address, '748 Evergreen Terrace', 'Address is "748 Evergreen Terrace"' ); is( $city, 'Springfield', 'City is "Springfield"' ); } { my $file = catfile( $TEST_DATA_DIR, 'empty' ); my $p = Text::RecordParser->new( $file ); throws_ok { my $data = $p->extract( qw[ foo ] ) } qr/Can't find columns/, 'extract dies without bound fields'; $p->bind_fields( qw[ foo bar baz ] ); my $data = $p->extract( qw[ foo ] ); is( $data, undef, 'extract returns undef on read of empty file' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; throws_ok { my $data = $p->extract('non-existent-field') } qr/invalid field/i, 'extract dies on bad field request'; } { my $file = catfile( $TEST_DATA_DIR, 'bad-file' ); my $p = Text::RecordParser->new( $file ); lives_ok { my @row = $p->fetchrow_array } 'fetchrow_array does not die reading unescaped quote'; } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); my $row = $p->fetchrow_hashref; is( $row->{'City'}, 'Springfield', 'fetchrow_hashref works without binding fields' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; my @row = $p->fetchrow_array; is( $row[0], '"Simpson, Homer"', 'Field "Simpson, Homer"' ); is( $row[1], '747 Evergreen Terrace', 'Field "747 Evergreen Terrace"' ); is( $row[-1], q["Bart,Lisa,Maggie,Santa's Little Helper"], 'Correct dependents list' ); my $row = $p->fetchrow_hashref; is( $row->{'Name'}, '"Flanders, Ned"', 'Name is "Flanders, Ned"' ); is( $row->{'City'}, 'Springfield', 'City is "Springfield"' ); is( $row->{'State'}, '', 'State is empty' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->set_field_alias({ Moniker => 'Name,Name', City => [ qw( town township ) ], }); my @aliases = $p->get_field_aliases('City'); is(join(',', @aliases), 'town,township', 'City => town,township'); my $row = $p->fetchrow_hashref; is( $row->{'Moniker'}, '"Simpson, Homer"', 'Moniker alias for Name' ); is( $row->{'town'}, 'Springfield', 'town alias for city' ); is( $row->{'township'}, 'Springfield', 'township alias for city' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; my $data = $p->fetchall_arrayref; is( scalar @$data, 2, 'fetchall_arrayref gets 2 records' ); my $row = $data->[0]; is( $row->[0], '"Simpson, Homer"', 'Field "Simpson, Homer"' ); is( $row->[1], '747 Evergreen Terrace', 'Field "747 Evergreen Terrace"' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv'); my $p = Text::RecordParser->new( $file ); $p->bind_header; my $data = $p->fetchall_arrayref( { Columns => {} } ); is( scalar @$data, 2, 'fetchall_hashref gets 2 records' ); my $row = $data->[1]; is( $row->{'Name'}, '"Flanders, Ned"', 'Name is "Flanders, Ned"' ); is( $row->{'City'}, 'Springfield', 'City is "Springfield"' ); is( $row->{'State'}, '', 'State is empty' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; my $data = $p->fetchall_arrayref('Bad'); is( scalar @$data, 2, 'fetchall_arrayref ignores bad param' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv'); my $p = Text::RecordParser->new( $file ); $p->bind_header; throws_ok { my $data = $p->fetchall_hashref('Bad Field') } qr/Invalid key field/, 'fetchall_hashref dies on bad field'; } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; my $data = $p->fetchall_hashref('Name'); is( scalar keys %$data, 2, 'fetchall_hashref gets 2 records' ); my $row = $data->{'"Simpson, Homer"'}; is( $row->{'Wife'}, 'Marge', 'Wife is "Marge"' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); $p->bind_header; $p->field_compute( 'crazy_name', sub { my ( $field, $others ) = @_; my $name = $others->{'Name'}; $name =~ s/"//g; $name =~ s/^.*,\s+//g; return "Crazy $name!"; } ); my $data = $p->fetchall_hashref('crazy_name'); is( scalar keys %$data, 2, 'fetchall_hashref gets 2 records' ); my $row = $data->{'Crazy Homer!'}; is( $row->{'Wife'}, 'Marge', 'Wife is "Marge"' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.ssv' ); my $p = Text::RecordParser->new( trim => 1, field_separator => qr/\s+/, filename => $file, ); $p->bind_header; my $row = $p->fetchrow_hashref; is( $row->{'Address'}, '747 Evergreen Terrace', 'Address is "747 Evergreen Terrace"' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.pdd' ); my $p = Text::RecordParser->new( trim => 1, field_separator => '|', filename => $file, ); $p->bind_header; my $row = $p->fetchrow_hashref; is( $row->{'Address'}, '747 Evergreen Terrace', 'Address is "747 Evergreen Terrace"' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.tab' ); my $p = Text::RecordParser::Tab->new( $file ); my $row = $p->fetchrow_hashref; is( $row->{'Pets'}, q[Snowball(s),Santa's Little Helper], 'Pets OK (apostrophe backslashed-unescaped)' ); } { my $p = Text::RecordParser->new( { fh => \*DATA } ); my $o1 = $p->fetchrow_object; is( $o1->name, 'moose', 'moose OK' ); my $o2 = $p->fetchrow_object; is( $o2->name, 'poodle', 'poodle OK' ); my $o3 = $p->fetchrow_object; is( $o3, undef, 'No problem reading off the end' ); } __DATA__ name,id moose,1 poodle,2 Text-RecordParser-v1.6.3/t/06-filter.t000444000765000024 335512201220566 17256 0ustar00kclarkstaff000000000000#!perl # # tests for "header_filter" and "field_filter" # use strict; use File::Spec::Functions; use FindBin '$Bin'; use Readonly; use Test::Exception; use Test::More tests => 14; use Text::RecordParser; Readonly my $EMPTY_STR => q{}; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $p = Text::RecordParser->new; is( $p->header_filter, $EMPTY_STR, 'Header filter is blank' ); throws_ok { $p->header_filter('foo') } qr/doesn't look like code/, 'Header filter rejects bad argument'; $p->bind_fields( qw[ One Two Three ] ); is( ref $p->header_filter( sub { lc shift } ), 'CODE', 'Header filter takes value' ); is( join(',', $p->field_list), 'one,two,three', 'setting header filter after binding fields changes field names' ); is( $p->header_filter($EMPTY_STR), $EMPTY_STR, 'Header filter resets to nothing' ); is( $p->field_filter, $EMPTY_STR, 'Field filter is blank' ); throws_ok { $p->field_filter('foo') } qr/doesn't look like code/, 'Field filter rejects bad argument'; is( ref $p->field_filter( sub { lc shift } ), 'CODE', 'Field filter takes value' ); is( $p->field_filter($EMPTY_STR), $EMPTY_STR, 'Field filter resets to nothing' ); $p->header_filter( sub { lc shift } ); $p->field_filter( sub { uc shift } ); $p->filename( catfile( $TEST_DATA_DIR, 'simpsons.csv' ) ); $p->bind_header; my @fields = $p->field_list; is( $fields[0], 'name', 'Field "name"' ); is( $fields[2], 'city', 'Field "city"' ); is( $fields[-1], 'dependents', 'Field "dependents"' ); my @row = $p->fetchrow_array; is( $row[2], 'SPRINGFIELD', 'City is "SPRINGFIELD"' ); is( $row[4], 'MARGE', 'Wife is "MARGE"' ); } Text-RecordParser-v1.6.3/t/07-parse.t000444000765000024 220412201220566 17074 0ustar00kclarkstaff000000000000#!perl # # tests for alternate parsing # use strict; use File::Spec::Functions; use FindBin qw( $Bin ); use Readonly; use Test::More tests => 4; use Text::RecordParser; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $file = catfile( $TEST_DATA_DIR, 'simpsons.tab' ); my $p = Text::RecordParser->new( filename => $file, field_separator => "\t", ); $p->bind_header; my $row = $p->fetchrow_hashref; is( $row->{'Wife'}, 'Marge', 'Wife is Marge' ); } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.alt' ); my $p = Text::RecordParser->new( filename => $file, field_separator => "\n", record_separator => "\n//\n", ); $p->bind_header; my $row = $p->fetchrow_hashref; is( $row->{'Wife'}, 'Marge', 'Wife is still Marge' ); } { my $file = catfile( $TEST_DATA_DIR, 'pipe.dat' ); my $p = Text::RecordParser->new( filename => $file, field_separator => qr/\|/, ); my $row = $p->fetchrow_array; is( $row->[0], 'MSH', 'First field is "MSH"' ); is( $row->[-1], '2.2', 'Last field is "2.2"' ); } Text-RecordParser-v1.6.3/t/08-compute.t000444000765000024 403712201220566 17445 0ustar00kclarkstaff000000000000#!perl # # tests for field and record compute # use strict; use File::Spec::Functions; use FindBin qw( $Bin ); use Readonly; use Test::Exception; use Test::More tests => 9; use Text::RecordParser; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $p = Text::RecordParser->new; throws_ok { $p->field_compute( '', 'foo' ) } qr/no usable field/i, 'field_compute dies on no field name'; } { my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( filename => $file, header_filter => sub { lc shift }, field_filter => sub { $_ = shift; s/^\s+|\s+$//g; s/"//g; $_ }, ); $p->bind_header; throws_ok { $p->field_compute( 'dependents', 'foo' ) } qr/not code/i, 'field_compute rejects not code'; $p->field_compute( 'dependents', sub { [ split /,/, shift() ] } ); $p->field_compute( 'wife', sub { my ( $field, $others ) = @_; my $husband = $others->{'name'} || ''; $husband =~ s/^.*?,\s*//; return $field.', wife of '.$husband; } ); my $row = $p->fetchrow_hashref; my $dependents = $row->{'dependents'}; is( scalar @{ $dependents || [] }, 4, 'Four dependents' ); is( $dependents->[0], 'Bart', 'Firstborn is Bart' ); is( $dependents->[-1], q[Santa's Little Helper], q[Last is Santa's Little Helper] ); is( $row->{'wife'}, 'Marge, wife of Homer', q[Marge is still Homer's wife] ); } { my $file = catfile( $TEST_DATA_DIR, 'numbers.csv' ); my $p = Text::RecordParser->new( $file ); $p->field_compute( 3, sub { my ( $cur, $others ) = @_; my $sum; $sum += $_ for @$others; return $sum; } ); my $data = $p->fetchall_arrayref; my $rec = $data->[0]; is( $rec->[-1], 9, 'Sum is 9' ); $rec = $data->[1]; is( $rec->[-1], 37, 'Sum is 37' ); $rec = $data->[2]; is( $rec->[-1], 18, 'Sum is 18' ); } Text-RecordParser-v1.6.3/t/09-comment.t000444000765000024 225012201220566 17427 0ustar00kclarkstaff000000000000#!perl # # tests for skipping records matching a comment regex # use strict; use File::Spec::Functions; use FindBin qw( $Bin ); use Readonly; use Test::Exception; use Test::More tests => 5; use Text::RecordParser; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $p = Text::RecordParser->new; throws_ok { $p->comment('foo') } qr/look like a regex/i, '"comment" rejects non-regex argument'; } { my $file = catfile( $TEST_DATA_DIR, 'commented.dat' ); my $p = Text::RecordParser->new( filename => $file, comment => qr/^#/, ); $p->bind_header; my $row1 = $p->fetchrow_hashref; is( $row1->{'field1'}, 'foo', 'Field is "foo"' ); my $row2 = $p->fetchrow_hashref; is( $row2->{'field2'}, 'bang', 'Field is "bang"' ); } { my $file = catfile( $TEST_DATA_DIR, 'commented2.dat' ); my $p = Text::RecordParser->new( filename => $file, comment => qr/^--/, ); $p->bind_header; my $row1 = $p->fetchrow_hashref; is( $row1->{'field1'}, 'foo', 'Field is "foo"' ); my $row2 = $p->fetchrow_hashref; is( $row2->{'field2'}, 'bang', 'Field is "bang"' ); } Text-RecordParser-v1.6.3/t/10-tablify.t000444000765000024 1305612201220566 17435 0ustar00kclarkstaff000000000000#!perl use strict; use Config; use FindBin qw( $Bin ); use Readonly; use Test::More; use File::Spec::Functions; Readonly my $TEST_COUNT => 13; Readonly my $PERL => $Config{'perlpath'}; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); Readonly my $TABLIFY => catfile( $Bin, '..', 'bin', 'tablify' ); plan tests => $TEST_COUNT; ok( -e $TABLIFY, 'Script exists' ); SKIP: { eval { require Text::TabularDisplay }; if ($@) { skip 'Text::TabularDisplay not installed', $TEST_COUNT - 1; } my $data = catfile( $TEST_DATA_DIR, 'people.dat' ); ok( -e $data, 'Data file exists' ); my $nh_data = catfile( $TEST_DATA_DIR, 'people-no-header.dat' ); ok( -e $nh_data, 'Other data file exists' ); my @tests = ( { name => 'Field list', args => "--fs ',' -l $data", expected => '+-----------+-----------+ | Field No. | Field | +-----------+-----------+ | 1 | name | | 2 | rank | | 3 | serial_no | | 4 | is_living | | 5 | age | +-----------+-----------+ ' }, { name => 'Select fields by name', args => "--fs ',' -f name,serial_no $data", expected => '+--------+-----------+ | name | serial_no | +--------+-----------+ | George | 190293 | | Dwight | 908348 | | Attila | | | Tojo | | | Tommy | 998110 | +--------+-----------+ 5 records returned ' }, { name => 'Limit', args => "--fs ',' --limit 2 -f name,serial_no $data", expected => '+--------+-----------+ | name | serial_no | +--------+-----------+ | George | 190293 | | Dwight | 908348 | +--------+-----------+ 2 records returned ' }, { name => 'Select fields by position', args => "--fs ',' -f 1-3,5 $data", expected => '+--------+---------+-----------+------+ | name | rank | serial_no | age | +--------+---------+-----------+------+ | George | General | 190293 | 64 | | Dwight | General | 908348 | 75 | | Attila | Hun | | 56 | | Tojo | Emporor | | 87 | | Tommy | General | 998110 | 54 | +--------+---------+-----------+------+ 5 records returned ' }, { name => 'Filter with regex', args => "--fs ',' -w 'serial_no=~/^\\d{6}\$/' $data", expected => '+--------+---------+-----------+-----------+------+ | name | rank | serial_no | is_living | age | +--------+---------+-----------+-----------+------+ | George | General | 190293 | 0 | 64 | | Dwight | General | 908348 | 0 | 75 | | Tommy | General | 998110 | 1 | 54 | +--------+---------+-----------+-----------+------+ 3 records returned ' }, { name => 'Filter with Perl operator', args => "--fs ',' -w 'name eq \"Dwight\"' $data", expected => '+--------+---------+-----------+-----------+------+ | name | rank | serial_no | is_living | age | +--------+---------+-----------+-----------+------+ | Dwight | General | 908348 | 0 | 75 | +--------+---------+-----------+-----------+------+ 1 record returned ' }, { name => 'Combine filter and field selection', args => "--fs ',' -f name -w 'is_living==1' ". "-w 'serial_no>0' $data", expected => '+-------+ | name | +-------+ | Tommy | +-------+ 1 record returned ' }, { name => 'No headers plus filtering by position', args => "--fs ',' --no-headers -w '3 eq \"General\"' $nh_data", expected => '+--------+---------+--------+--------+--------+ | Field1 | Field2 | Field3 | Field4 | Field5 | +--------+---------+--------+--------+--------+ | George | General | 190293 | 0 | 64 | | Dwight | General | 908348 | 0 | 75 | | Tommy | General | 998110 | 1 | 54 | +--------+---------+--------+--------+--------+ 3 records returned ' }, { name => 'Vertical display', args => "--fs ',' -v $data", no_strip => 1, expected => '************ Record 1 ************ name: George rank: General serial_no: 190293 is_living: 0 age : 64 ************ Record 2 ************ name: Dwight rank: General serial_no: 908348 is_living: 0 age : 75 ************ Record 3 ************ name: Attila rank: Hun serial_no: is_living: 0 age : 56 ************ Record 4 ************ name: Tojo rank: Emporor serial_no: is_living: 0 age : 87 ************ Record 5 ************ name: Tommy rank: General serial_no: 998110 is_living: 1 age : 54 5 records returned ' }, { name => 'No headers, vertical display', args => "--fs ',' --no-headers -v --limit 1 $nh_data", no_strip => 1, expected => '************ Record 1 ************ Field1: George Field2: General Field3: 190293 Field4: 0 Field5: 64 1 record returned ' }, ); my $command = "$PERL $TABLIFY "; for my $test ( @tests ) { my $out = `$command $test->{'args'}`; unless ( $test->{'no_strip'} ) { $test->{'expected'} =~ s/^\s*//xmsg; } is( $out, $test->{'expected'}, $test->{'name'} || 'Parsing' ); } }; Text-RecordParser-v1.6.3/t/11-tabmerge.t000444000765000024 741412201220566 17553 0ustar00kclarkstaff000000000000#!perl use strict; use Config; use File::Spec::Functions; use FindBin qw( $Bin ); use Readonly; use Test::More tests => 10; Readonly my $PERL => $Config{'perlpath'}; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); Readonly my $TABMERGE => catfile( $Bin, '..', 'bin', 'tabmerge' ); ok( -e $TABMERGE, 'Script exists' ); SKIP: { eval { require Text::TabularDisplay }; if ($@) { skip 'Text::TabularDisplay not installed', 9; } my @files = map { catfile($TEST_DATA_DIR, "merge${_}.tab") } (1..3); for my $file ( @files ) { ok( -e $file, 'Data file "$file" exists' ); } my $data = join( ' ', @files ); my @tests = ( { name => 'List', args => "--list $data", expected => '+-----------+-------------------+ | Field | No. Times Present | +-----------+-------------------+ | lod_score | 1 | | name | 3 | | position | 3 | | type | 2 | +-----------+-------------------+ ' }, { name => 'Merge min', args => "$data", expected => '+----------+----------+ | name | position | +----------+----------+ | RM104 | 2.30 | | RM105 | 4.5 | | TX5509 | 10.4 | | UU189 | 19.0 | | Xpsm122 | 3.3 | | Xpsr9556 | 4.5 | | DRTL | 2.30 | | ALTX | 4.5 | | DWRF | 10.4 | +----------+----------+ ' }, { name => 'Merge max', args => "--max $data", expected => '+-----------+----------+----------+--------+ | lod_score | name | position | type | +-----------+----------+----------+--------+ | | RM104 | 2.30 | RFLP | | | RM105 | 4.5 | RFLP | | | TX5509 | 10.4 | AFLP | | 2.4 | UU189 | 19.0 | SSR | | 1.2 | Xpsm122 | 3.3 | Marker | | 1.2 | Xpsr9556 | 4.5 | Marker | | | DRTL | 2.30 | | | | ALTX | 4.5 | | | | DWRF | 10.4 | | +-----------+----------+----------+--------+ ' }, { name => 'Merge on named fields', args => "-f name,type $data", expected => '+----------+--------+ | name | type | +----------+--------+ | RM104 | RFLP | | RM105 | RFLP | | TX5509 | AFLP | | UU189 | SSR | | Xpsm122 | Marker | | Xpsr9556 | Marker | | DRTL | | | ALTX | | | DWRF | | +----------+--------+ ' }, { name => 'Merge on named fields and sort', args => "-f name,lod_score -s name $data", expected => '+----------+-----------+ | name | lod_score | +----------+-----------+ | ALTX | | | DRTL | | | DWRF | | | RM104 | | | RM105 | | | TX5509 | | | UU189 | 2.4 | | Xpsm122 | 1.2 | | Xpsr9556 | 1.2 | +----------+-----------+ ' }, { name => 'Merge on named fields and sort, print stdout', args => "-f name,lod_score -s name --stdout $data", expected => 'name lod_score ALTX DRTL DWRF RM104 RM105 TX5509 UU189 2.4 Xpsm122 1.2 Xpsr9556 1.2 ' }, ); my $command = "$PERL $TABMERGE "; for my $test ( @tests ) { my $out = `$command $test->{'args'}`; $test->{'expected'} =~ s/^\s+//xmsg; is( $out, $test->{'expected'}, $test->{'name'} || 'Parsing' ); } }; Text-RecordParser-v1.6.3/t/12-object.t000444000765000024 127312201220566 17231 0ustar00kclarkstaff000000000000#!perl use strict; use Config; use File::Spec::Functions; use FindBin qw( $Bin ); use Readonly; use Test::Exception; use Test::More tests => 6; use Text::RecordParser; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); require_ok 'Text::RecordParser::Object'; my $file = catfile( $TEST_DATA_DIR, 'simpsons.csv' ); my $p = Text::RecordParser->new( $file ); my $r; ok( $r = $p->fetchrow_object, 'Got object' ); isa_ok( $r, 'Text::RecordParser::Object', 'Correct class' ); ok( $r->can('Address') && 1, 'Has the "Address" method' ); is( $r->Address, '747 Evergreen Terrace', "Address is good"); throws_ok { $r->Address('900 Oakhill Circle') } qr/cannot alter/, "Method is read-only"; Text-RecordParser-v1.6.3/t/13-tab2graph.t000444000765000024 174712201220566 17644 0ustar00kclarkstaff000000000000#!perl use strict; use Config; use File::Basename qw( basename ); use File::Spec::Functions; use FindBin qw( $Bin ); use Readonly; use Test::More; Readonly my $TEST_COUNT => 4; Readonly my $PERL => $Config{'perlpath'}; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); Readonly my $TAB2GRAPH => catfile( $Bin, '..', 'bin', 'tab2graph' ); plan tests => $TEST_COUNT; ok( -e $TAB2GRAPH, 'Script exists' ); SKIP: { eval { require GraphViz }; if ($@) { skip 'GraphViz not installed', $TEST_COUNT - 1; } my $data = catfile( $TEST_DATA_DIR, 'tabular.tab' ); ok( -e $data, 'Data file exists' ); my $out_file = catfile( $Bin, 'foo.png' ); my $command = "$TAB2GRAPH -c -o $out_file $data 2>&1"; my $out = `$command`; my $basename = basename( $out_file ); is( $out, qq[Image created "$basename."\n], 'Diagnostic OK' ); my $file_size = -s $out_file; ok( $file_size > 0, 'File is correct size' ); unlink $out_file; }; Text-RecordParser-v1.6.3/t/14-trim.t000444000765000024 161412201220566 16737 0ustar00kclarkstaff000000000000#!perl # # tests for "trim" # use strict; use File::Spec::Functions; use FindBin '$Bin'; use Readonly; use Test::Exception; use Test::More tests => 4; use Text::RecordParser; use Text::RecordParser::Tab; Readonly my $TEST_DATA_DIR => catdir( $Bin, 'data' ); { my $file = catfile( $TEST_DATA_DIR, 'trim.csv' ); my $p = Text::RecordParser->new( $file ); my $r1 = $p->fetchrow_hashref; is( $r1->{'SerialNumber'}, '1656401 ', 'Serial number OK' ); my $r2 = $p->fetchrow_hashref; is( $r2->{'SerialNumber'}, ' ', 'Blank serial number OK' ); } { my $file = catfile( $TEST_DATA_DIR, 'trim.csv' ); my $p = Text::RecordParser->new( $file ); $p->trim(1); my $r1 = $p->fetchrow_hashref; is( $r1->{'SerialNumber'}, '1656401', 'Serial number OK' ); my $r2 = $p->fetchrow_hashref; is( $r2->{'SerialNumber'}, '', 'Blank serial number OK' ); } Text-RecordParser-v1.6.3/t/pod-coverage.t000444000765000024 25712201220566 20077 0ustar00kclarkstaff000000000000#!perl use Test::More; eval 'use Test::Pod::Coverage 1.04'; if ( $@ ) { plan skip_all => 'Test::Pod::Coverage 1.04 required for POD coverage' } all_pod_coverage_ok(); Text-RecordParser-v1.6.3/t/pod.t000444000765000024 21112201220566 16274 0ustar00kclarkstaff000000000000#!perl use Test::More; eval 'use Test::Pod 1.14'; plan skip_all => 'Test::Pod 1.14 required for testing POD' if $@; all_pod_files_ok(); Text-RecordParser-v1.6.3/t/data000755000765000024 012201220566 16127 5ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/t/data/bad-file000444000765000024 6612201220566 17614 0ustar00kclarkstaff000000000000This,can't,be,read,without,escaping,the,single-quote. Text-RecordParser-v1.6.3/t/data/commented.dat000444000765000024 14012201220566 20704 0ustar00kclarkstaff000000000000# this is a comment field1,field2,field3 foo,bar,baz # this is another comment flip,bang,zoowie Text-RecordParser-v1.6.3/t/data/commented2.dat000444000765000024 14212201220566 20770 0ustar00kclarkstaff000000000000-- this is a comment field1,field2,field3 foo,bar,baz -- this is another comment flip,bang,zoowie Text-RecordParser-v1.6.3/t/data/empty000444000765000024 012201220566 17253 0ustar00kclarkstaff000000000000Text-RecordParser-v1.6.3/t/data/merge1.tab000444000765000024 10312201220566 20106 0ustar00kclarkstaff000000000000name type position RM104 RFLP 2.30 RM105 RFLP 4.5 TX5509 AFLP 10.4 Text-RecordParser-v1.6.3/t/data/merge2.tab000444000765000024 13712201220566 20116 0ustar00kclarkstaff000000000000name type position lod_score UU189 SSR 19.0 2.4 Xpsm122 Marker 3.3 1.2 Xpsr9556 Marker 4.5 1.2 Text-RecordParser-v1.6.3/t/data/merge3.tab000444000765000024 5312201220566 20074 0ustar00kclarkstaff000000000000name position DRTL 2.30 ALTX 4.5 DWRF 10.4 Text-RecordParser-v1.6.3/t/data/numbers.csv000444000765000024 2312201220566 20407 0ustar00kclarkstaff0000000000001,3,5 32,4,1 9,5,4 Text-RecordParser-v1.6.3/t/data/people-no-header.dat000444000765000024 16412201220566 22063 0ustar00kclarkstaff000000000000George,General,190293,0,64 Dwight,General,908348,0,75 Attila,Hun,,0,56 Tojo,Emporor,,0,87 Tommy,General,998110,1,54 Text-RecordParser-v1.6.3/t/data/people.dat000444000765000024 22712201220566 20223 0ustar00kclarkstaff000000000000name,rank,serial_no,is_living,age George,General,190293,0,64 Dwight,General,908348,0,75 Attila,Hun,,0,56 Tojo,Emporor,,0,87 Tommy,General,998110,1,54 Text-RecordParser-v1.6.3/t/data/pipe.dat000444000765000024 64312201220566 17676 0ustar00kclarkstaff000000000000MSH|^~\&|BOSS|01|PC|00|20020715121500||ADT^A02|BOSS20020715815758|P|2.2 EVN|A02|20020715095900||03 PID|0||9354144|2053549|Nachname^iVorname^^^^""|Geburtsname|19470121|M|||Strasse 10^^Wohnort^^27383^""|||||M|L||||||Hamburg|||D|Techniker PV1|0|S|MKG 18^1823|E||MKG 18^ |11^Dr. med. Dr. dent. Christian Schippers|11^Dr. med. Dr. dent. Christian Schippers||||||| | |||2053549|GO96^01|Q|||S||||||||||||||||||||200207150959 Text-RecordParser-v1.6.3/t/data/simpsons.alt000444000765000024 30312201220566 20615 0ustar00kclarkstaff000000000000Name Address City State Wife Children // Simpson, Homer 747 Evergreen Terrace Springfield Marge Bart,Lisa,Maggie // Flander, Ned 748 Evergreen Terrace Springfield Maude (deceased) Rod,Todd // Text-RecordParser-v1.6.3/t/data/simpsons.csv000444000765000024 33312201220566 20633 0ustar00kclarkstaff000000000000Name,Address,City,State,Wife,Dependents "Simpson, Homer",747 Evergreen Terrace,Springfield,,Marge,"Bart,Lisa,Maggie,Santa's Little Helper" "Flanders, Ned",748 Evergreen Terrace,Springfield,,Maude (deceased),"Rod,Todd" Text-RecordParser-v1.6.3/t/data/simpsons.pdd000444000765000024 33312201220566 20607 0ustar00kclarkstaff000000000000Name|Address|City|State|Wife|Dependents "Simpson| Homer"|747 Evergreen Terrace|Springfield||Marge|"Bart|Lisa|Maggie|Santa's Little Helper" "Flanders| Ned"|748 Evergreen Terrace|Springfield||Maude (deceased)|"Rod|Todd" Text-RecordParser-v1.6.3/t/data/simpsons.ssv000444000765000024 26112201220566 20653 0ustar00kclarkstaff000000000000Name Address City State "Simpson, Homer" "747 Evergreen Terrace" Springfield MA "Flander, Ned" "748 Evergreen Terrace" Springfield MA Text-RecordParser-v1.6.3/t/data/simpsons.tab000444000765000024 34212201220566 20606 0ustar00kclarkstaff000000000000Name Address City State Wife Children Pets Simpson, Homer 747 Evergreen Terrace Springfield Marge Bart,Lisa,Maggie Snowball(s),Santa\'s Little Helper Flander, Ned 748 Evergreen Terrace Springfield Maude (deceased) Rod,Todd Text-RecordParser-v1.6.3/t/data/tabular.tab000444000765000024 7412201220566 20347 0ustar00kclarkstaff000000000000data Jimmy Johnny Patty Ken 0 4 10 Casey 8 3 3 Corrie 1 0 2 Text-RecordParser-v1.6.3/t/data/trim.csv000444000765000024 10312201220566 17726 0ustar00kclarkstaff000000000000Name,Rank,SerialNumber Gene,Corporal,1656401 Ken,Discharged,