| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 1 | use strict; | 
 | 2 | use warnings; | 
 | 3 | use Test::More; | 
 | 4 | use Data::Dumper; | 
 | 5 | use JSON::XS; | 
 | 6 |  | 
| Akron | fab17d3 | 2020-07-31 14:38:29 +0200 | [diff] [blame^] | 7 | if ($ENV{SKIP_REAL}) { | 
 | 8 |   plan skip_all => 'Skip real tests'; | 
 | 9 | }; | 
 | 10 |  | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 11 | use Benchmark qw/:hireswallclock/; | 
 | 12 |  | 
 | 13 | my $t = Benchmark->new; | 
 | 14 |  | 
 | 15 | use utf8; | 
 | 16 | use lib 'lib', '../lib'; | 
 | 17 |  | 
 | 18 | use File::Basename 'dirname'; | 
 | 19 | use File::Spec::Functions 'catdir'; | 
 | 20 |  | 
| Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 21 | use_ok('KorAP::XML::Krill'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 22 |  | 
 | 23 | my $path = catdir(dirname(__FILE__), '../corpus/BZK/D59/00089'); | 
 | 24 |  | 
| Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 25 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::XML::Krill'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 26 | ok($doc->parse, 'Parse document'); | 
 | 27 |  | 
| Akron | 1cd5b87 | 2016-03-22 00:23:46 +0100 | [diff] [blame] | 28 | is($doc->text_sigle, 'BZK/D59/00089', 'Correct text sigle'); | 
 | 29 | is($doc->doc_sigle, 'BZK/D59', 'Correct document sigle'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 30 | is($doc->corpus_sigle, 'BZK', 'Correct corpus sigle'); | 
 | 31 |  | 
| Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 32 | my $meta = $doc->meta; | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 33 | is($meta->{T_title}, 'Saragat-Partei zerfällt', 'Title'); | 
 | 34 | ok(!$meta->{T_sub_title}, 'No SubTitle'); | 
 | 35 | ok(!$meta->{T_author}, 'Author'); | 
 | 36 | ok(!$meta->{A_editor}, 'Editor'); | 
 | 37 | is($meta->{S_pub_place}, 'Berlin', 'PubPlace'); | 
 | 38 | is($meta->{D_pub_date}, '19590219', 'PubDate'); | 
 | 39 | ok(!$meta->{A_publisher}, 'Publisher'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 40 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 41 | is($meta->{S_text_type}, 'Zeitung: Tageszeitung', 'Correct Text Type'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 42 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 43 | ok(!$meta->{S_text_type_art}, 'Correct Text Type Art'); | 
 | 44 | is($meta->{S_text_type_ref}, 'Tageszeitung', 'Correct Text Type Ref'); | 
 | 45 | is($meta->{S_text_domain}, 'Politik', 'Correct Text Domain'); | 
 | 46 | is($meta->{S_text_column}, 'POLITIK', 'Correct Text Column'); | 
 | 47 | is($meta->{K_text_class}->[0], 'politik', 'Correct Text Class'); | 
 | 48 | is($meta->{K_text_class}->[1], 'ausland', 'Correct Text Class'); | 
 | 49 | ok(!$meta->{K_text_class}->[2], 'Correct Text Class'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 50 |  | 
 | 51 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 52 | is($meta->{D_creation_date}, '19590219', 'Creation date'); | 
 | 53 | is($meta->{S_availability}, 'ACA-NC-LC', 'License'); | 
| Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 54 | ok(!$meta->{pages}, 'Pages'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 55 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 56 | ok(!$meta->{A_file_edition_statement}, 'File Statement'); | 
 | 57 | ok(!$meta->{A_bibl_edition_statement}, 'Bibl Statement'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 58 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 59 | is($meta->{A_reference} . "\n", <<'REF', 'Reference'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 60 | Neues Deutschland, [Tageszeitung], 19.02.1959, Jg. 14, Berliner Ausgabe, S. 7. - Sachgebiet: Politik, Originalressort: POLITIK; Saragat-Partei zerfällt | 
 | 61 | REF | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 62 | is($meta->{S_language}, 'de', 'Language'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 63 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 64 | is($meta->{T_corpus_title}, 'Bonner Zeitungskorpus', 'Correct Corpus title'); | 
 | 65 | ok(!$meta->{T_corpus_sub_title}, 'Correct Corpus sub title'); | 
 | 66 | ok(!$meta->{T_corpus_author}, 'Correct Corpus author'); | 
 | 67 | ok(!$meta->{A_corpus_editor}, 'Correct Corpus editor'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 68 |  | 
| Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 69 | is($meta->{T_doc_title}, 'Neues Deutschland', 'Correct Doc title'); | 
 | 70 | is($meta->{T_doc_sub_title}, 'Organ des Zentralkomitees der Sozialistischen Einheitspartei Deutschlands', 'Correct Doc sub title'); | 
 | 71 | ok(!$meta->{T_doc_author}, 'Correct Doc author'); | 
 | 72 | ok(!$meta->{A_doc_editor}, 'Correct doc editor'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 73 |  | 
 | 74 | # Tokenization | 
| Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 75 | use_ok('KorAP::XML::Tokenizer'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 76 |  | 
 | 77 | my ($token_base_foundry, $token_base_layer) = (qw/OpenNLP Tokens/); | 
 | 78 |  | 
 | 79 | # Get tokenization | 
| Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 80 | my $tokens = KorAP::XML::Tokenizer->new( | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 81 |   path => $doc->path, | 
 | 82 |   doc => $doc, | 
 | 83 |   foundry => $token_base_foundry, | 
 | 84 |   layer => $token_base_layer, | 
 | 85 |   name => 'tokens' | 
 | 86 | ); | 
 | 87 | ok($tokens, 'Token Object is fine'); | 
 | 88 | ok($tokens->parse, 'Token parsing is fine'); | 
 | 89 |  | 
 | 90 | my $output = decode_json( $tokens->to_json ); | 
 | 91 |  | 
 | 92 | is(substr($output->{data}->{text}, 0, 100), 'Saragat-Partei zerfällt Rom (ADN) die von dem Rechtssozialisten Saragat geführte Sozialdemokratische', 'Primary Data'); | 
 | 93 | is($output->{data}->{name}, 'tokens', 'tokenName'); | 
 | 94 | is($output->{data}->{tokenSource}, 'opennlp#tokens', 'tokenSource'); | 
 | 95 | is($output->{version}, '0.03', 'version'); | 
 | 96 | is($output->{data}->{foundries}, '', 'Foundries'); | 
 | 97 | is($output->{data}->{layerInfos}, '', 'layerInfos'); | 
| Akron | 2d83a5a | 2016-02-26 00:21:16 +0100 | [diff] [blame] | 98 | is($output->{data}->{stream}->[0]->[4], 's:Saragat-Partei', 'data'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 99 |  | 
| Akron | 1cd5b87 | 2016-03-22 00:23:46 +0100 | [diff] [blame] | 100 | is($output->{textSigle}, 'BZK/D59/00089', 'Correct text sigle'); | 
 | 101 | is($output->{docSigle}, 'BZK/D59', 'Correct document sigle'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 102 | is($output->{corpusSigle}, 'BZK', 'Correct corpus sigle'); | 
 | 103 |  | 
 | 104 | is($output->{title}, 'Saragat-Partei zerfällt', 'Title'); | 
 | 105 | ok(!exists $output->{subTitle}, 'No SubTitle'); | 
 | 106 | ok(!exists $output->{author}, 'Author'); | 
 | 107 | ok(!exists $output->{editor}, 'Publisher'); | 
 | 108 |  | 
 | 109 | is($output->{pubPlace}, 'Berlin', 'PubPlace'); | 
 | 110 | ok(!exists $output->{publisher}, 'Publisher'); | 
 | 111 |  | 
 | 112 | is($output->{textType}, 'Zeitung: Tageszeitung', 'Correct Text Type'); | 
 | 113 | ok(!exists $output->{textTypeArt}, 'Correct Text Type Art'); | 
 | 114 | is($output->{textTypeRef}, 'Tageszeitung', 'Correct Text Type Ref'); | 
 | 115 | is($output->{textDomain}, 'Politik', 'Correct Text Domain'); | 
 | 116 | is($output->{textClass}, 'politik ausland', 'Correct Text Domain'); | 
 | 117 |  | 
 | 118 | is($output->{creationDate}, '19590219', 'Creation date'); | 
| Akron | 6396c30 | 2016-03-18 16:05:39 +0100 | [diff] [blame] | 119 | is($output->{availability}, 'ACA-NC-LC', 'License'); | 
| Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 120 | ok(!exists $output->{pages}, 'Pages'); | 
 | 121 | ok(!exists $output->{fileEditionStatement}, 'File Statement'); | 
 | 122 | ok(!exists $output->{biblEditionStatement}, 'Bibl Statement'); | 
 | 123 |  | 
 | 124 | is($output->{reference} . "\n", <<'REF', 'Reference'); | 
 | 125 | Neues Deutschland, [Tageszeitung], 19.02.1959, Jg. 14, Berliner Ausgabe, S. 7. - Sachgebiet: Politik, Originalressort: POLITIK; Saragat-Partei zerfällt | 
 | 126 | REF | 
 | 127 | is($output->{language}, 'de', 'Language'); | 
 | 128 |  | 
 | 129 | is($output->{corpusTitle}, 'Bonner Zeitungskorpus', 'Correct Corpus title'); | 
 | 130 | ok(!exists $output->{corpusSubTitle}, 'Correct Corpus sub title'); | 
 | 131 | ok(!exists $output->{corpusAuthor}, 'Correct Corpus author'); | 
 | 132 | ok(!exists $output->{corpusEditor}, 'Correct Corpus editor'); | 
 | 133 |  | 
 | 134 | is($output->{docTitle}, 'Neues Deutschland', 'Correct Doc title'); | 
 | 135 | is($output->{docSubTitle}, 'Organ des Zentralkomitees der Sozialistischen Einheitspartei Deutschlands', 'Correct Doc sub title'); | 
 | 136 | ok(!exists $output->{docAuthor}, 'Correct Doc author'); | 
 | 137 | ok(!exists $output->{docEditor}, 'Correct doc editor'); | 
 | 138 |  | 
 | 139 | ## Base | 
 | 140 | $tokens->add('Base', 'Sentences'); | 
 | 141 | $tokens->add('Base', 'Paragraphs'); | 
 | 142 |  | 
 | 143 | $output = decode_json( $tokens->to_json ); | 
 | 144 |  | 
 | 145 | is($output->{data}->{foundries}, 'base base/paragraphs base/sentences', 'Foundries'); | 
 | 146 | is($output->{data}->{layerInfos}, 'base/s=spans', 'layerInfos'); | 
 | 147 | my $first_token = join('||', @{$output->{data}->{stream}->[0]}); | 
 | 148 | like($first_token, qr/s:Saragat-Partei/, 'data'); | 
 | 149 | like($first_token, qr/_0\$<i>0<i>14/, 'data'); | 
 | 150 |  | 
 | 151 | ## OpenNLP | 
 | 152 | $tokens->add('OpenNLP', 'Sentences'); | 
 | 153 |  | 
 | 154 | $output = decode_json( $tokens->to_json ); | 
 | 155 | is($output->{data}->{foundries}, | 
 | 156 |    'base base/paragraphs base/sentences opennlp opennlp/sentences', | 
 | 157 |    'Foundries'); | 
 | 158 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/s=spans', 'layerInfos'); | 
 | 159 |  | 
 | 160 | $tokens->add('OpenNLP', 'Morpho'); | 
 | 161 | $output = decode_json( $tokens->to_json ); | 
 | 162 | is($output->{data}->{foundries}, | 
 | 163 |    'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences', | 
 | 164 |    'Foundries'); | 
 | 165 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans', 'layerInfos'); | 
 | 166 |  | 
 | 167 | ## Treetagger | 
 | 168 | $tokens->add('TreeTagger', 'Sentences'); | 
 | 169 | $output = decode_json( $tokens->to_json ); | 
 | 170 | is($output->{data}->{foundries}, | 
 | 171 |    'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/sentences', | 
 | 172 |    'Foundries'); | 
 | 173 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/s=spans', 'layerInfos'); | 
 | 174 |  | 
 | 175 | $tokens->add('TreeTagger', 'Morpho'); | 
 | 176 | $output = decode_json( $tokens->to_json ); | 
 | 177 | is($output->{data}->{foundries}, | 
 | 178 |    'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', | 
 | 179 |    'Foundries'); | 
 | 180 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); | 
 | 181 |  | 
 | 182 | ## CoreNLP | 
 | 183 | $tokens->add('CoreNLP', 'NamedEntities'); | 
 | 184 | $output = decode_json( $tokens->to_json ); | 
 | 185 | is($output->{data}->{foundries}, | 
 | 186 |    'base base/paragraphs base/sentences corenlp corenlp/namedentities opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', | 
 | 187 |    'Foundries'); | 
 | 188 | is($output->{data}->{layerInfos}, 'base/s=spans corenlp/ne=tokens opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); | 
 | 189 |  | 
 | 190 | $tokens->add('CoreNLP', 'Sentences'); | 
 | 191 | $output = decode_json( $tokens->to_json ); | 
 | 192 | is($output->{data}->{foundries}, | 
 | 193 |    'base base/paragraphs base/sentences corenlp corenlp/namedentities corenlp/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', | 
 | 194 |    'Foundries'); | 
 | 195 | is($output->{data}->{layerInfos}, 'base/s=spans corenlp/ne=tokens corenlp/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); | 
 | 196 |  | 
 | 197 | $tokens->add('CoreNLP', 'Morpho'); | 
 | 198 | $output = decode_json( $tokens->to_json ); | 
 | 199 | like($output->{data}->{foundries}, qr!corenlp/morpho!, 'Foundries'); | 
 | 200 | like($output->{data}->{layerInfos}, qr!corenlp/p=tokens!, 'layerInfos'); | 
 | 201 |  | 
 | 202 | $tokens->add('CoreNLP', 'Constituency'); | 
 | 203 | $output = decode_json( $tokens->to_json ); | 
 | 204 | like($output->{data}->{foundries}, qr!corenlp/constituency!, 'Foundries'); | 
 | 205 | like($output->{data}->{layerInfos}, qr!corenlp/c=spans!, 'layerInfos'); | 
 | 206 |  | 
 | 207 | ## Glemm | 
 | 208 | $tokens->add('Glemm', 'Morpho'); | 
 | 209 | $output = decode_json( $tokens->to_json ); | 
 | 210 | like($output->{data}->{foundries}, qr!glemm/morpho!, 'Foundries'); | 
 | 211 | like($output->{data}->{layerInfos}, qr!glemm/l=tokens!, 'layerInfos'); | 
 | 212 |  | 
 | 213 | ## Connexor | 
 | 214 | $tokens->add('Connexor', 'Sentences'); | 
 | 215 | $output = decode_json( $tokens->to_json ); | 
 | 216 | like($output->{data}->{foundries}, qr!connexor/sentences!, 'Foundries'); | 
 | 217 | like($output->{data}->{layerInfos}, qr!cnx/s=spans!, 'layerInfos'); | 
 | 218 |  | 
 | 219 | $tokens->add('Connexor', 'Morpho'); | 
 | 220 | $output = decode_json( $tokens->to_json ); | 
 | 221 | like($output->{data}->{foundries}, qr!connexor/morpho!, 'Foundries'); | 
 | 222 | like($output->{data}->{layerInfos}, qr!cnx/p=tokens!, 'layerInfos'); | 
 | 223 | like($output->{data}->{layerInfos}, qr!cnx/l=tokens!, 'layerInfos'); | 
 | 224 | like($output->{data}->{layerInfos}, qr!cnx/m=tokens!, 'layerInfos'); | 
 | 225 |  | 
 | 226 | $tokens->add('Connexor', 'Phrase'); | 
 | 227 | $output = decode_json( $tokens->to_json ); | 
 | 228 | like($output->{data}->{foundries}, qr!connexor/phrase!, 'Foundries'); | 
 | 229 | like($output->{data}->{layerInfos}, qr!cnx/c=spans!, 'layerInfos'); | 
 | 230 |  | 
 | 231 | $tokens->add('Connexor', 'Syntax'); | 
 | 232 | $output = decode_json( $tokens->to_json ); | 
 | 233 | like($output->{data}->{foundries}, qr!connexor/syntax!, 'Foundries'); | 
 | 234 | like($output->{data}->{layerInfos}, qr!cnx/syn=tokens!, 'layerInfos'); | 
 | 235 |  | 
 | 236 | ## Mate | 
 | 237 | $tokens->add('Mate', 'Morpho'); | 
 | 238 | $output = decode_json( $tokens->to_json ); | 
 | 239 | like($output->{data}->{foundries}, qr!mate/morpho!, 'Foundries'); | 
 | 240 | like($output->{data}->{layerInfos}, qr!mate/p=tokens!, 'layerInfos'); | 
 | 241 | like($output->{data}->{layerInfos}, qr!mate/l=tokens!, 'layerInfos'); | 
 | 242 | like($output->{data}->{layerInfos}, qr!mate/m=tokens!, 'layerInfos'); | 
 | 243 |  | 
 | 244 | # diag "No test for mate dependency"; | 
 | 245 |  | 
 | 246 | ## XIP | 
 | 247 | $tokens->add('XIP', 'Sentences'); | 
 | 248 | $output = decode_json( $tokens->to_json ); | 
 | 249 | like($output->{data}->{foundries}, qr!xip/sentences!, 'Foundries'); | 
 | 250 | like($output->{data}->{layerInfos}, qr!xip/s=spans!, 'layerInfos'); | 
 | 251 |  | 
 | 252 | $tokens->add('XIP', 'Morpho'); | 
 | 253 | $output = decode_json( $tokens->to_json ); | 
 | 254 | like($output->{data}->{foundries}, qr!xip/morpho!, 'Foundries'); | 
 | 255 | like($output->{data}->{layerInfos}, qr!xip/l=tokens!, 'layerInfos'); | 
 | 256 | like($output->{data}->{layerInfos}, qr!xip/p=tokens!, 'layerInfos'); | 
 | 257 |  | 
 | 258 | $tokens->add('XIP', 'Constituency'); | 
 | 259 | $output = decode_json( $tokens->to_json ); | 
 | 260 | like($output->{data}->{foundries}, qr!xip/constituency!, 'Foundries'); | 
 | 261 | like($output->{data}->{layerInfos}, qr!xip/c=spans!, 'layerInfos'); | 
 | 262 |  | 
 | 263 | # diag "No test for xip dependency"; | 
 | 264 |  | 
 | 265 | done_testing; | 
 | 266 | __END__ |