Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use Data::Dumper; |
| 5 | use JSON::XS; |
| 6 | |
| 7 | use Benchmark qw/:hireswallclock/; |
| 8 | |
| 9 | my $t = Benchmark->new; |
| 10 | |
| 11 | use utf8; |
| 12 | use lib 'lib', '../lib'; |
| 13 | |
| 14 | use File::Basename 'dirname'; |
| 15 | use File::Spec::Functions 'catdir'; |
| 16 | |
| 17 | use_ok('KorAP::XML::Krill'); |
| 18 | |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 19 | my $path = catdir(dirname(__FILE__), '../corpus/WPD/00001'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 20 | |
| 21 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 22 | ok($doc->parse, 'Parse document'); |
| 23 | |
Akron | 1cd5b87 | 2016-03-22 00:23:46 +0100 | [diff] [blame] | 24 | is($doc->text_sigle, 'WPD/AAA/00001', 'Correct text sigle'); |
| 25 | is($doc->doc_sigle, 'WPD/AAA', 'Correct document sigle'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 26 | is($doc->corpus_sigle, 'WPD', 'Correct corpus sigle'); |
| 27 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 28 | my $meta = $doc->meta; |
| 29 | is($meta->{title}, 'A', 'Title'); |
| 30 | is($meta->{pub_place}, 'URL:http://de.wikipedia.org', 'PubPlace'); |
| 31 | is($meta->{pub_date}, '20050328', 'Creation Date'); |
Akron | 7e2eb88 | 2017-01-18 17:28:07 +0100 | [diff] [blame] | 32 | SKIP: { |
| 33 | skip 'Failure because corpus is no longer supported', 1; |
| 34 | ok(!$meta->{sub_title}, 'SubTitle'); |
| 35 | }; |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 36 | is($meta->{author}, 'Ruru; Jens.Ol; Aglarech; u.a.', 'Author'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 37 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 38 | ok(!$meta->{doc_title}, 'Correct Doc title'); |
| 39 | ok(!$meta->{doc_sub_title}, 'Correct Doc Sub title'); |
| 40 | ok(!$meta->{doc_author}, 'Correct Doc author'); |
| 41 | ok(!$meta->{doc_editor}, 'Correct Doc editor'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 42 | |
Akron | 35db6e3 | 2016-03-17 22:42:22 +0100 | [diff] [blame] | 43 | ok(!$meta->{corpus_title}, 'Correct Corpus title'); |
| 44 | ok(!$meta->{corpus_sub_title}, 'Correct Corpus Sub title'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 45 | |
| 46 | # Tokenization |
| 47 | use_ok('KorAP::XML::Tokenizer'); |
| 48 | |
| 49 | my ($token_base_foundry, $token_base_layer) = (qw/OpenNLP Tokens/); |
| 50 | |
| 51 | # Get tokenization |
| 52 | my $tokens = KorAP::XML::Tokenizer->new( |
| 53 | path => $doc->path, |
| 54 | doc => $doc, |
| 55 | foundry => $token_base_foundry, |
| 56 | layer => $token_base_layer, |
| 57 | name => 'tokens' |
| 58 | ); |
| 59 | ok($tokens, 'Token Object is fine'); |
| 60 | ok($tokens->parse, 'Token parsing is fine'); |
| 61 | |
| 62 | my $output = $tokens->to_data; |
| 63 | |
| 64 | is(substr($output->{data}->{text}, 0, 100), 'A bzw. a ist der erste Buchstabe des lateinischen Alphabets und ein Vokal. Der Buchstabe A hat in de', 'Primary Data'); |
| 65 | is($output->{data}->{name}, 'tokens', 'tokenName'); |
| 66 | is($output->{data}->{tokenSource}, 'opennlp#tokens', 'tokenSource'); |
| 67 | |
| 68 | is($output->{version}, '0.03', 'version'); |
| 69 | is($output->{data}->{foundries}, '', 'Foundries'); |
| 70 | is($output->{data}->{layerInfos}, '', 'layerInfos'); |
| 71 | is($output->{data}->{stream}->[0]->[4], 's:A', 'data'); |
| 72 | |
| 73 | $tokens->add('Mate', 'Dependency'); |
| 74 | |
| 75 | my $stream = $tokens->to_data->{data}->{stream}; |
| 76 | |
Akron | 75ba57d | 2016-03-07 23:36:27 +0100 | [diff] [blame] | 77 | # This is not a goot relation example |
| 78 | is($stream->[77]->[0], |
| 79 | '<:mate/d:--$<b>34<i>498<i>499<i>78<i>78', |
| 80 | 'element to term'); |
Akron | 75ba57d | 2016-03-07 23:36:27 +0100 | [diff] [blame] | 81 | is($stream->[78]->[0], '>:mate/d:--$<b>33<i>498<i>499<i>77<i>78', 'term to element'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 82 | |
Akron | af0ae3f | 2016-07-14 16:21:50 +0200 | [diff] [blame] | 83 | $tokens->add('Base', 'Sentences'); |
| 84 | |
| 85 | $stream = $tokens->to_data->{data}->{stream}; |
| 86 | |
| 87 | is($stream->[0]->[2], '<>:base/s:s$<b>64<i>0<i>74<i>13<b>2', 'Text starts with sentence'); |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 88 | |
Akron | 6f9fef5 | 2016-11-03 17:06:40 +0100 | [diff] [blame] | 89 | |
| 90 | # Problematic document |
| 91 | $path = catdir(dirname(__FILE__), '../corpus/WPD15/W28/65631'); |
| 92 | ok($doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 93 | ok($doc->parse, 'Parse document'); |
| 94 | |
| 95 | is($doc->text_sigle, 'WPD15/W28/65631', 'Correct text sigle'); |
| 96 | is($doc->doc_sigle, 'WPD15/W28', 'Correct document sigle'); |
| 97 | is($doc->corpus_sigle, 'WPD15', 'Correct corpus sigle'); |
| 98 | |
| 99 | # Get tokenization |
| 100 | $tokens = KorAP::XML::Tokenizer->new( |
| 101 | path => $doc->path, |
| 102 | doc => $doc, |
| 103 | foundry => 'Base', |
| 104 | layer => 'tokens_aggr', |
| 105 | name => 'tokens' |
| 106 | ); |
| 107 | ok($tokens, 'Token Object is fine'); |
| 108 | ok($tokens->parse, 'Token parsing is fine'); |
| 109 | |
| 110 | is($tokens->foundry, 'Base', 'Foundry'); |
| 111 | is($tokens->layer, 'tokens_aggr', 'Layer'); |
| 112 | |
Akron | 6f9fef5 | 2016-11-03 17:06:40 +0100 | [diff] [blame] | 113 | ok($tokens->add('CoreNLP', 'Constituency'), 'Add Structure'); |
| 114 | |
| 115 | $output = $tokens->to_data; |
| 116 | |
| 117 | is($output->{data}->{foundries}, 'corenlp corenlp/constituency', 'Foundries'); |
| 118 | is($output->{data}->{layerInfos}, 'corenlp/c=spans', 'layerInfos'); |
Akron | 7e2eb88 | 2017-01-18 17:28:07 +0100 | [diff] [blame] | 119 | is($doc->meta->{editor}, 'wikipedia.org', 'Editor'); |
Akron | 6f9fef5 | 2016-11-03 17:06:40 +0100 | [diff] [blame] | 120 | |
Akron | d3a0bad | 2016-02-26 14:07:58 +0100 | [diff] [blame] | 121 | done_testing; |
| 122 | __END__ |
| 123 | |
| 124 | |
| 125 | |
| 126 | |