Added tests and minor metadata parsing adjustments for HNC

Change-Id: Ibc2e79895469cdc1175c3cd065f89d78ea6e9e12
diff --git a/t/real/hnc.t b/t/real/hnc.t
new file mode 100644
index 0000000..cde02d9
--- /dev/null
+++ b/t/real/hnc.t
@@ -0,0 +1,127 @@
+use strict;
+use warnings;
+use Test::More;
+use Data::Dumper;
+use JSON::XS;
+
+use Benchmark qw/:hireswallclock/;
+
+my $t = Benchmark->new;
+
+use utf8;
+use lib 'lib', '../lib';
+
+use File::Basename 'dirname';
+use File::Spec::Functions 'catdir';
+
+use_ok('KorAP::XML::Krill');
+
+# This will check preliminary HNC-Files
+
+# HNC/DOC00001/00001
+my $path = catdir(dirname(__FILE__), '../corpus/HNC/DOC00001/00001');
+
+ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document');
+ok($doc->parse, 'Parse document');
+
+is($doc->text_sigle, 'HNC/DOC00001/00001', 'Correct text sigle');
+is($doc->doc_sigle, 'HNC/DOC00001', 'Correct document sigle');
+is($doc->corpus_sigle, 'HNC', 'Correct corpus sigle');
+
+my $meta = $doc->meta;
+is($meta->{title}, 'GNU Free Documentation License', 'Title');
+is($meta->{pub_place}, 'H_PUBPLACE', 'PubPlace');
+
+# Defined on document level as
+# idsHeader > fileDesc > publicationStmt > pubDate == 2005/08/16
+# idsHeader > fileDesc > biblFull > publicationStmt > pubDate == 2003/07/08-2014/05/03
+# idsHeader > fileDesc > biblFull > publicationStmt > sourceDesc > biblStruct > monogr > imprint > pubDate == 2003/07/08-2014/05/03
+# is($meta->{pub_date}, '20030708', 'Publication date');
+ok(!$meta->{sub_title}, 'SubTitle');
+is($meta->{author}, 'Addbot', 'Author');
+
+is($meta->{publisher}, 'H_PUBLISHER', 'Publisher');
+ok(!$meta->{editor}, 'Editor');
+ok(!$meta->{translator}, 'Translator');
+
+ok(!$meta->{text_type}, 'Correct Text Type');
+ok(!$meta->{text_type_art}, 'Correct Text Type Art');
+ok(!$meta->{text_type_ref}, 'Correct Text Type Ref');
+ok(!$meta->{text_column}, 'Correct Text Column');
+ok(!$meta->{text_domain}, 'Correct Text Domain');
+is($meta->{creation_date}, '20130302', 'Creation Date');
+
+ok(!$meta->{pages}, 'Pages');
+ok(!$meta->{file_edition_statement}, 'File Ed Statement');
+ok(!$meta->{bibl_edition_statement}, 'Bibl Ed Statement');
+ok(!$meta->{reference}, 'Reference');
+is($meta->{language}, 'hu', 'Language');
+
+ok(!$meta->{corpus_title}, 'Correct Corpus title');
+ok(!$meta->{corpus_sub_title}, 'Correct Corpus Sub title');
+ok(!$meta->{corpus_author}, 'Correct Corpus author');
+ok(!$meta->{corpus_editor}, 'Correct Corpus editor');
+
+is($meta->{doc_title}, 'MNSZ hivatalos korpusz: Wikipédia cikkek', 'Correct Doc title');
+ok(!$meta->{doc_sub_title}, 'Correct Doc Sub title');
+ok(!$meta->{doc_author}, 'Correct Doc author');
+ok(!$meta->{doc_editor}, 'Correct Doc editor');
+
+# Tokenization
+use_ok('KorAP::XML::Tokenizer');
+
+my ($token_base_foundry, $token_base_layer) = (qw/HNC Morpho/);
+
+# Get tokenization
+my $tokens = KorAP::XML::Tokenizer->new(
+  path => $doc->path,
+  doc => $doc,
+  foundry => $token_base_foundry,
+  layer => $token_base_layer,
+  name => 'tokens'
+);
+ok($tokens, 'Token Object is fine');
+ok($tokens->parse, 'Token parsing is fine');
+
+my $output = decode_json( $tokens->to_json );
+
+is($output->{data}->{stream}->[0]->[1], '<>:base/s:t$<b>64<i>0<i>4368<i>577<b>0', 't');
+is($output->{data}->{stream}->[0]->[3], 'i:addbot', 't');
+
+
+## Base
+ok($tokens->add('DeReKo', 'Structure', 'base_sentences_paragraphs'), 'DeReKo');
+ok($tokens->add('HNC', 'Morpho'), 'Add HNC Morphology');
+
+$output = $tokens->to_data;
+
+is($output->{data}->{foundries}, 'dereko dereko/structure dereko/structure/base_sentences_paragraphs hnc hnc/morpho', 'Foundries');
+
+is($output->{data}->{layerInfos}, 'dereko/s=spans hnc/l=tokens hnc/m=tokens hnc/p=tokens', 'layerInfos');
+
+my $token = join('||', @{$output->{data}->{stream}->[7]});
+
+like($token, qr!hnc/l:free!, 'data');
+like($token, qr!hnc/m:compound:n!, 'data');
+like($token, qr!hnc/m:hyphenated:n!, 'data');
+like($token, qr!hnc/m:mboundary:free!, 'data');
+like($token, qr!hnc/m:morphemes:ZERO::NOM!, 'data');
+like($token, qr!hnc/m:stem:free::FN!, 'data');
+like($token, qr!hnc/p:FN\.NOM!, 'data');
+like($token, qr!i:free!, 'data');
+like($token, qr!s:Free!, 'data');
+
+$token = join('||', @{$output->{data}->{stream}->[30]});
+
+like($token, qr!hnc/l:tervez!, 'data');
+like($token, qr!hnc/m:compound:n!, 'data');
+like($token, qr!hnc/m:hyphenated:n!, 'data');
+like($token, qr!hnc/m:mboundary:tervez\+ett!, 'data');
+like($token, qr!hnc/m:morphemes:ett::_MIB ZERO::NOM!, 'data');
+like($token, qr!hnc/m:stem:tervez::IGE!, 'data');
+like($token, qr!hnc/p:IGE\._MIB\.NOM!, 'data');
+like($token, qr!i:tervezett!, 'data');
+like($token, qr!s:tervezett!, 'data');
+
+done_testing;
+__END__