Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use Data::Dumper; |
| 5 | use JSON::XS; |
| 6 | |
| 7 | use Benchmark qw/:hireswallclock/; |
| 8 | |
| 9 | my $t = Benchmark->new; |
| 10 | |
| 11 | use utf8; |
| 12 | use lib 'lib', '../lib'; |
| 13 | |
| 14 | use File::Basename 'dirname'; |
| 15 | use File::Spec::Functions 'catdir'; |
| 16 | |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 17 | use_ok('KorAP::XML::Krill'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 18 | |
| 19 | # GOE/AGA/03828 |
| 20 | my $path = catdir(dirname(__FILE__), '../corpus/WDD/G27/38989'); |
| 21 | |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 22 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 23 | ok($doc->parse, 'Parse document'); |
| 24 | |
| 25 | is($doc->text_sigle, 'WDD11_G27.38989', 'Correct text sigle'); |
| 26 | is($doc->doc_sigle, 'WDD11_G27', 'Correct document sigle'); |
| 27 | is($doc->corpus_sigle, 'WDD11', 'Correct corpus sigle'); |
| 28 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 29 | my $meta = $doc->meta; |
| 30 | is($meta->{title}, 'Diskussion:Gunter A. Pilz', 'Title'); |
| 31 | ok(!$meta->{sub_title}, 'No SubTitle'); |
| 32 | is($meta->{author}, '€pa, u.a.', 'Author'); |
| 33 | ok(!$meta->{editor}, 'Publisher'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 34 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 35 | is($meta->{pub_place}, 'URL:http://de.wikipedia.org', 'PubPlace'); |
| 36 | is($meta->{publisher}, 'Wikipedia', 'Publisher'); |
| 37 | is($meta->{text_type}, 'Diskussionen zu Enzyklopädie-Artikeln', 'Correct Text Type'); |
| 38 | ok(!$meta->{text_type_art}, 'Correct Text Type Art'); |
| 39 | ok(!$meta->{text_type_ref}, 'Correct Text Type Ref'); |
| 40 | ok(!$meta->{text_domain}, 'Correct Text Domain'); |
| 41 | is($meta->{creation_date}, '20070707', 'Creation date'); |
Akron | 6396c30 | 2016-03-18 16:05:39 +0100 | [diff] [blame] | 42 | is($meta->{availability}, 'CC-BY-SA', 'License'); |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 43 | ok(!$meta->{pages}, 'Pages'); |
| 44 | ok(!$meta->{file_edition_statement}, 'File Statement'); |
| 45 | ok(!$meta->{bibl_edition_statement}, 'Bibl Statement'); |
| 46 | is($meta->{reference} . "\n", <<'REF', 'Reference'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 47 | Diskussion:Gunter A. Pilz, In: Wikipedia - URL:http://de.wikipedia.org/wiki/Diskussion:Gunter_A._Pilz: Wikipedia, 2007 |
| 48 | REF |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 49 | is($meta->{language}, 'de', 'Language'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 50 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 51 | is($meta->{corpus_title}, 'Wikipedia', 'Correct Corpus title'); |
| 52 | ok(!$meta->{corpus_sub_title}, 'Correct Corpus sub title'); |
| 53 | ok(!$meta->{corpus_author}, 'Correct Corpus author'); |
| 54 | is($meta->{corpus_editor}, 'wikipedia.org', 'Correct Corpus editor'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 55 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 56 | is($meta->{doc_title}, 'Wikipedia, Diskussionen zu Artikeln mit Anfangsbuchstabe G, Teil 27', 'Correct Doc title'); |
| 57 | ok(!$meta->{doc_sub_title}, 'Correct Doc sub title'); |
| 58 | ok(!$meta->{doc_author}, 'Correct Doc author'); |
| 59 | ok(!$meta->{doc_editor}, 'Correct doc editor'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 60 | |
| 61 | # Tokenization |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 62 | use_ok('KorAP::XML::Tokenizer'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 63 | |
| 64 | my ($token_base_foundry, $token_base_layer) = (qw/OpenNLP Tokens/); |
| 65 | |
| 66 | # Get tokenization |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 67 | my $tokens = KorAP::XML::Tokenizer->new( |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 68 | path => $doc->path, |
| 69 | doc => $doc, |
| 70 | foundry => $token_base_foundry, |
| 71 | layer => $token_base_layer, |
| 72 | name => 'tokens' |
| 73 | ); |
| 74 | ok($tokens, 'Token Object is fine'); |
| 75 | ok($tokens->parse, 'Token parsing is fine'); |
| 76 | |
| 77 | my $output = decode_json( $tokens->to_json ); |
| 78 | |
| 79 | is(substr($output->{data}->{text}, 0, 100), '{{War Löschkandidat|6. Juli 2007|(erl., bleibt)}}', 'Primary Data'); |
| 80 | is($output->{data}->{name}, 'tokens', 'tokenName'); |
| 81 | is($output->{data}->{tokenSource}, 'opennlp#tokens', 'tokenSource'); |
| 82 | is($output->{version}, '0.03', 'version'); |
| 83 | is($output->{data}->{foundries}, '', 'Foundries'); |
| 84 | is($output->{data}->{layerInfos}, '', 'layerInfos'); |
Akron | 2d83a5a | 2016-02-26 00:21:16 +0100 | [diff] [blame] | 85 | is($output->{data}->{stream}->[0]->[4], 's:{War', 'data'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 86 | |
| 87 | is($output->{textSigle}, 'WDD11_G27.38989', 'Correct text sigle'); |
| 88 | is($output->{docSigle}, 'WDD11_G27', 'Correct document sigle'); |
| 89 | is($output->{corpusSigle}, 'WDD11', 'Correct corpus sigle'); |
| 90 | |
| 91 | is($output->{title}, 'Diskussion:Gunter A. Pilz', 'Title'); |
| 92 | ok(!$output->{subTitle}, 'No SubTitle'); |
| 93 | is($output->{author}, '€pa, u.a.', 'Author'); |
| 94 | ok(!$output->{editor}, 'Editor'); |
| 95 | |
| 96 | is($output->{pubPlace}, 'URL:http://de.wikipedia.org', 'PubPlace'); |
| 97 | is($output->{publisher}, 'Wikipedia', 'Publisher'); |
| 98 | is($output->{textType}, 'Diskussionen zu Enzyklopädie-Artikeln', 'Correct Text Type'); |
| 99 | ok(!$output->{textTypeArt}, 'Correct Text Type Art'); |
| 100 | ok(!$output->{textTypeRef}, 'Correct Text Type Ref'); |
| 101 | ok(!$output->{textDomain}, 'Correct Text Domain'); |
| 102 | is($output->{creationDate}, '20070707', 'Creation date'); |
Akron | 6396c30 | 2016-03-18 16:05:39 +0100 | [diff] [blame] | 103 | is($output->{availability}, 'CC-BY-SA', 'License'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 104 | ok(!$output->{pages}, 'Pages'); |
| 105 | ok(!$output->{fileEditionStatement}, 'File Statement'); |
| 106 | ok(!$output->{biblEditionStatement}, 'Bibl Statement'); |
| 107 | is($output->{reference} . "\n", <<'REF', 'Reference'); |
| 108 | Diskussion:Gunter A. Pilz, In: Wikipedia - URL:http://de.wikipedia.org/wiki/Diskussion:Gunter_A._Pilz: Wikipedia, 2007 |
| 109 | REF |
| 110 | is($output->{language}, 'de', 'Language'); |
| 111 | |
| 112 | is($output->{corpusTitle}, 'Wikipedia', 'Correct Corpus title'); |
| 113 | ok(!$output->{corpusSubTitle}, 'Correct Corpus sub title'); |
| 114 | ok(!$output->{corpusAuthor}, 'Correct Corpus author'); |
| 115 | is($output->{corpusEditor}, 'wikipedia.org', 'Correct Corpus editor'); |
| 116 | |
| 117 | is($output->{docTitle}, 'Wikipedia, Diskussionen zu Artikeln mit Anfangsbuchstabe G, Teil 27', 'Correct Doc title'); |
| 118 | ok(!$output->{docSubTitle}, 'Correct Doc sub title'); |
| 119 | ok(!$output->{docAuthor}, 'Correct Doc author'); |
| 120 | ok(!$output->{docEditor}, 'Correct doc editor'); |
| 121 | |
| 122 | ## Base |
| 123 | $tokens->add('Base', 'Sentences'); |
| 124 | |
| 125 | $tokens->add('Base', 'Paragraphs'); |
| 126 | |
| 127 | $output = decode_json( $tokens->to_json ); |
| 128 | |
| 129 | is($output->{data}->{foundries}, 'base base/paragraphs base/sentences', 'Foundries'); |
| 130 | is($output->{data}->{layerInfos}, 'base/s=spans', 'layerInfos'); |
| 131 | my $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
Akron | a9455ec | 2016-02-14 01:42:20 +0100 | [diff] [blame] | 132 | like($first_token, qr/s:\{War/, 'data'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 133 | like($first_token, qr/_0\$<i>1<i>5/, 'data'); |
| 134 | |
| 135 | |
| 136 | ## OpenNLP |
| 137 | $tokens->add('OpenNLP', 'Sentences'); |
| 138 | |
| 139 | $output = decode_json( $tokens->to_json ); |
| 140 | is($output->{data}->{foundries}, |
| 141 | 'base base/paragraphs base/sentences opennlp opennlp/sentences', |
| 142 | 'Foundries'); |
| 143 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/s=spans', 'layerInfos'); |
| 144 | |
| 145 | |
| 146 | $tokens->add('OpenNLP', 'Morpho'); |
| 147 | $output = decode_json( $tokens->to_json ); |
| 148 | is($output->{data}->{foundries}, |
| 149 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences', |
| 150 | 'Foundries'); |
| 151 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans', 'layerInfos'); |
| 152 | |
| 153 | |
| 154 | ## Treetagger |
| 155 | $tokens->add('TreeTagger', 'Sentences'); |
| 156 | $output = decode_json( $tokens->to_json ); |
| 157 | is($output->{data}->{foundries}, |
| 158 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/sentences', |
| 159 | 'Foundries'); |
| 160 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/s=spans', 'layerInfos'); |
| 161 | |
| 162 | $tokens->add('TreeTagger', 'Morpho'); |
| 163 | $output = decode_json( $tokens->to_json ); |
| 164 | is($output->{data}->{foundries}, |
| 165 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', |
| 166 | 'Foundries'); |
| 167 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); |
| 168 | |
| 169 | ## CoreNLP |
| 170 | { |
| 171 | local $SIG{__WARN__} = sub {}; |
| 172 | $tokens->add('CoreNLP', 'NamedEntities'); |
| 173 | }; |
| 174 | $output = decode_json( $tokens->to_json ); |
| 175 | is($output->{data}->{foundries}, |
| 176 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', |
| 177 | 'Foundries'); |
| 178 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); |
| 179 | |
| 180 | |
| 181 | { |
| 182 | local $SIG{__WARN__} = sub {}; |
| 183 | $tokens->add('CoreNLP', 'Sentences'); |
| 184 | }; |
| 185 | $output = decode_json( $tokens->to_json ); |
| 186 | is($output->{data}->{foundries}, |
| 187 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', |
| 188 | 'Foundries'); |
| 189 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); |
| 190 | |
| 191 | { |
| 192 | local $SIG{__WARN__} = sub {}; |
| 193 | $tokens->add('CoreNLP', 'Morpho'); |
| 194 | }; |
| 195 | $output = decode_json( $tokens->to_json ); |
| 196 | unlike($output->{data}->{foundries}, qr!corenlp/morpho!, 'Foundries'); |
| 197 | unlike($output->{data}->{layerInfos}, qr!corenlp/p=tokens!, 'layerInfos'); |
| 198 | |
| 199 | { |
| 200 | local $SIG{__WARN__} = sub {}; |
| 201 | $tokens->add('CoreNLP', 'Constituency'); |
| 202 | }; |
| 203 | $output = decode_json( $tokens->to_json ); |
| 204 | unlike($output->{data}->{foundries}, qr!corenlp/constituency!, 'Foundries'); |
| 205 | unlike($output->{data}->{layerInfos}, qr!corenlp/c=spans!, 'layerInfos'); |
| 206 | |
| 207 | ## Glemm |
| 208 | { |
| 209 | local $SIG{__WARN__} = sub {}; |
| 210 | $tokens->add('Glemm', 'Morpho'); |
| 211 | }; |
| 212 | $output = decode_json( $tokens->to_json ); |
| 213 | unlike($output->{data}->{foundries}, qr!glemm/morpho!, 'Foundries'); |
| 214 | unlike($output->{data}->{layerInfos}, qr!glemm/l=tokens!, 'layerInfos'); |
| 215 | |
| 216 | ## Connexor |
| 217 | $tokens->add('Connexor', 'Sentences'); |
| 218 | $output = decode_json( $tokens->to_json ); |
| 219 | like($output->{data}->{foundries}, qr!connexor/sentences!, 'Foundries'); |
| 220 | like($output->{data}->{layerInfos}, qr!cnx/s=spans!, 'layerInfos'); |
| 221 | |
| 222 | $tokens->add('Connexor', 'Morpho'); |
| 223 | $output = decode_json( $tokens->to_json ); |
| 224 | like($output->{data}->{foundries}, qr!connexor/morpho!, 'Foundries'); |
| 225 | like($output->{data}->{layerInfos}, qr!cnx/p=tokens!, 'layerInfos'); |
| 226 | like($output->{data}->{layerInfos}, qr!cnx/l=tokens!, 'layerInfos'); |
| 227 | like($output->{data}->{layerInfos}, qr!cnx/m=tokens!, 'layerInfos'); |
| 228 | |
| 229 | $tokens->add('Connexor', 'Phrase'); |
| 230 | $output = decode_json( $tokens->to_json ); |
| 231 | like($output->{data}->{foundries}, qr!connexor/phrase!, 'Foundries'); |
| 232 | like($output->{data}->{layerInfos}, qr!cnx/c=spans!, 'layerInfos'); |
| 233 | |
| 234 | $tokens->add('Connexor', 'Syntax'); |
| 235 | $output = decode_json( $tokens->to_json ); |
| 236 | like($output->{data}->{foundries}, qr!connexor/syntax!, 'Foundries'); |
| 237 | like($output->{data}->{layerInfos}, qr!cnx/syn=tokens!, 'layerInfos'); |
| 238 | |
| 239 | ## Mate |
| 240 | $tokens->add('Mate', 'Morpho'); |
| 241 | $output = decode_json( $tokens->to_json ); |
| 242 | like($output->{data}->{foundries}, qr!mate/morpho!, 'Foundries'); |
| 243 | like($output->{data}->{layerInfos}, qr!mate/p=tokens!, 'layerInfos'); |
| 244 | like($output->{data}->{layerInfos}, qr!mate/l=tokens!, 'layerInfos'); |
| 245 | like($output->{data}->{layerInfos}, qr!mate/m=tokens!, 'layerInfos'); |
| 246 | |
| 247 | # diag "No test for mate dependency"; |
| 248 | |
| 249 | ## XIP |
| 250 | $tokens->add('XIP', 'Sentences'); |
| 251 | $output = decode_json( $tokens->to_json ); |
| 252 | like($output->{data}->{foundries}, qr!xip/sentences!, 'Foundries'); |
| 253 | like($output->{data}->{layerInfos}, qr!xip/s=spans!, 'layerInfos'); |
| 254 | |
| 255 | $tokens->add('XIP', 'Morpho'); |
| 256 | $output = decode_json( $tokens->to_json ); |
| 257 | like($output->{data}->{foundries}, qr!xip/morpho!, 'Foundries'); |
| 258 | like($output->{data}->{layerInfos}, qr!xip/l=tokens!, 'layerInfos'); |
| 259 | like($output->{data}->{layerInfos}, qr!xip/p=tokens!, 'layerInfos'); |
| 260 | |
| 261 | $tokens->add('XIP', 'Constituency'); |
| 262 | $output = decode_json( $tokens->to_json ); |
| 263 | like($output->{data}->{foundries}, qr!xip/constituency!, 'Foundries'); |
| 264 | like($output->{data}->{layerInfos}, qr!xip/c=spans!, 'layerInfos'); |
| 265 | |
| 266 | # diag "No test for xip dependency"; |
| 267 | |
| 268 | done_testing; |
| 269 | __END__ |