Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use Data::Dumper; |
| 5 | use JSON::XS; |
| 6 | |
Akron | fab17d3 | 2020-07-31 14:38:29 +0200 | [diff] [blame] | 7 | if ($ENV{SKIP_REAL}) { |
| 8 | plan skip_all => 'Skip real tests'; |
| 9 | }; |
| 10 | |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 11 | use Benchmark qw/:hireswallclock/; |
| 12 | |
| 13 | my $t = Benchmark->new; |
| 14 | |
| 15 | use utf8; |
| 16 | use lib 'lib', '../lib'; |
| 17 | |
| 18 | use File::Basename 'dirname'; |
| 19 | use File::Spec::Functions 'catdir'; |
| 20 | |
| 21 | use_ok('KorAP::XML::Krill'); |
| 22 | |
| 23 | # This will check preliminary HNC-Files |
| 24 | |
| 25 | # HNC/DOC00001/00001 |
Akron | 414ec95 | 2020-08-03 15:48:43 +0200 | [diff] [blame] | 26 | my $path = catdir(dirname(__FILE__), 'corpus','HNC','DOC00001','00001'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 27 | |
| 28 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 29 | ok($doc->parse, 'Parse document'); |
| 30 | |
| 31 | is($doc->text_sigle, 'HNC/DOC00001/00001', 'Correct text sigle'); |
| 32 | is($doc->doc_sigle, 'HNC/DOC00001', 'Correct document sigle'); |
| 33 | is($doc->corpus_sigle, 'HNC', 'Correct corpus sigle'); |
| 34 | |
| 35 | my $meta = $doc->meta; |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 36 | is($meta->{T_title}, 'GNU Free Documentation License', 'Title'); |
| 37 | is($meta->{S_pub_place}, 'H_PUBPLACE', 'PubPlace'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 38 | |
| 39 | # Defined on document level as |
| 40 | # idsHeader > fileDesc > publicationStmt > pubDate == 2005/08/16 |
| 41 | # idsHeader > fileDesc > biblFull > publicationStmt > pubDate == 2003/07/08-2014/05/03 |
| 42 | # idsHeader > fileDesc > biblFull > publicationStmt > sourceDesc > biblStruct > monogr > imprint > pubDate == 2003/07/08-2014/05/03 |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 43 | # is($meta->{D_pub_date}, '20030708', 'Publication date'); |
| 44 | ok(!$meta->{T_sub_title}, 'SubTitle'); |
| 45 | is($meta->{T_author}, 'Addbot', 'Author'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 46 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 47 | is($meta->{A_publisher}, 'H_PUBLISHER', 'Publisher'); |
| 48 | ok(!$meta->{A_editor}, 'Editor'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 49 | ok(!$meta->{translator}, 'Translator'); |
| 50 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 51 | ok(!$meta->{S_text_type}, 'Correct Text Type'); |
| 52 | ok(!$meta->{S_text_type_art}, 'Correct Text Type Art'); |
| 53 | ok(!$meta->{S_text_type_ref}, 'Correct Text Type Ref'); |
| 54 | ok(!$meta->{S_text_column}, 'Correct Text Column'); |
| 55 | ok(!$meta->{S_text_domain}, 'Correct Text Domain'); |
| 56 | is($meta->{D_creation_date}, '20130302', 'Creation Date'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 57 | |
| 58 | ok(!$meta->{pages}, 'Pages'); |
Akron | 57799fc | 2020-02-11 11:42:33 +0100 | [diff] [blame] | 59 | is($meta->{A_file_edition_statement}, 'Magyar Nemzeti Szövegtár 2. változat - XML formátum', 'File Ed Statement'); |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 60 | ok(!$meta->{A_bibl_edition_statement}, 'Bibl Ed Statement'); |
| 61 | ok(!$meta->{A_reference}, 'Reference'); |
| 62 | is($meta->{S_language}, 'hu', 'Language'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 63 | |
Akron | 0d68a4b | 2019-11-13 15:42:11 +0100 | [diff] [blame] | 64 | is($meta->{S_availability}, 'Kutatási célokra, megállapodás alapján, hozzáférhető', 'Availability'); |
| 65 | is($meta->{A_distributor}, 'MTA Nyelvtudományi Intézet', 'Distributor'); |
| 66 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 67 | ok(!$meta->{T_corpus_title}, 'Correct Corpus title'); |
| 68 | ok(!$meta->{T_corpus_sub_title}, 'Correct Corpus Sub title'); |
| 69 | ok(!$meta->{T_corpus_author}, 'Correct Corpus author'); |
| 70 | ok(!$meta->{A_corpus_editor}, 'Correct Corpus editor'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 71 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 72 | is($meta->{T_doc_title}, 'MNSZ hivatalos korpusz: Wikipédia cikkek', 'Correct Doc title'); |
| 73 | ok(!$meta->{T_doc_sub_title}, 'Correct Doc Sub title'); |
| 74 | ok(!$meta->{T_doc_author}, 'Correct Doc author'); |
| 75 | ok(!$meta->{A_doc_editor}, 'Correct Doc editor'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 76 | |
| 77 | # Tokenization |
| 78 | use_ok('KorAP::XML::Tokenizer'); |
| 79 | |
| 80 | my ($token_base_foundry, $token_base_layer) = (qw/HNC Morpho/); |
| 81 | |
| 82 | # Get tokenization |
| 83 | my $tokens = KorAP::XML::Tokenizer->new( |
| 84 | path => $doc->path, |
| 85 | doc => $doc, |
| 86 | foundry => $token_base_foundry, |
| 87 | layer => $token_base_layer, |
| 88 | name => 'tokens' |
| 89 | ); |
| 90 | ok($tokens, 'Token Object is fine'); |
| 91 | ok($tokens->parse, 'Token parsing is fine'); |
| 92 | |
| 93 | my $output = decode_json( $tokens->to_json ); |
| 94 | |
Akron | dec4312 | 2020-03-03 11:22:25 +0100 | [diff] [blame] | 95 | is($output->{data}->{stream}->[0]->[1], '<>:base/s:t$<b>64<i>0<i>4368<i>578<b>0', 't'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 96 | is($output->{data}->{stream}->[0]->[3], 'i:addbot', 't'); |
Akron | dec4312 | 2020-03-03 11:22:25 +0100 | [diff] [blame] | 97 | is($output->{data}->{stream}->[-1]->[0], '_577$<i>4359<i>4368', 't'); |
Akron | c893ac3 | 2018-07-18 17:59:02 +0200 | [diff] [blame] | 98 | |
| 99 | |
| 100 | ## Base |
| 101 | ok($tokens->add('DeReKo', 'Structure', 'base_sentences_paragraphs'), 'DeReKo'); |
| 102 | ok($tokens->add('HNC', 'Morpho'), 'Add HNC Morphology'); |
| 103 | |
| 104 | $output = $tokens->to_data; |
| 105 | |
| 106 | is($output->{data}->{foundries}, 'dereko dereko/structure dereko/structure/base_sentences_paragraphs hnc hnc/morpho', 'Foundries'); |
| 107 | |
| 108 | is($output->{data}->{layerInfos}, 'dereko/s=spans hnc/l=tokens hnc/m=tokens hnc/p=tokens', 'layerInfos'); |
| 109 | |
| 110 | my $token = join('||', @{$output->{data}->{stream}->[7]}); |
| 111 | |
| 112 | like($token, qr!hnc/l:free!, 'data'); |
| 113 | like($token, qr!hnc/m:compound:n!, 'data'); |
| 114 | like($token, qr!hnc/m:hyphenated:n!, 'data'); |
| 115 | like($token, qr!hnc/m:mboundary:free!, 'data'); |
| 116 | like($token, qr!hnc/m:morphemes:ZERO::NOM!, 'data'); |
| 117 | like($token, qr!hnc/m:stem:free::FN!, 'data'); |
| 118 | like($token, qr!hnc/p:FN\.NOM!, 'data'); |
| 119 | like($token, qr!i:free!, 'data'); |
| 120 | like($token, qr!s:Free!, 'data'); |
| 121 | |
| 122 | $token = join('||', @{$output->{data}->{stream}->[30]}); |
| 123 | |
| 124 | like($token, qr!hnc/l:tervez!, 'data'); |
| 125 | like($token, qr!hnc/m:compound:n!, 'data'); |
| 126 | like($token, qr!hnc/m:hyphenated:n!, 'data'); |
| 127 | like($token, qr!hnc/m:mboundary:tervez\+ett!, 'data'); |
| 128 | like($token, qr!hnc/m:morphemes:ett::_MIB ZERO::NOM!, 'data'); |
| 129 | like($token, qr!hnc/m:stem:tervez::IGE!, 'data'); |
| 130 | like($token, qr!hnc/p:IGE\._MIB\.NOM!, 'data'); |
| 131 | like($token, qr!i:tervezett!, 'data'); |
| 132 | like($token, qr!s:tervezett!, 'data'); |
| 133 | |
| 134 | done_testing; |
| 135 | __END__ |