Akron | d1eed34 | 2016-01-25 20:55:55 +0100 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use File::Basename 'dirname'; |
| 5 | use File::Spec::Functions 'catdir'; |
| 6 | use Data::Dumper; |
Akron | 8fa76bc | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 7 | use KorAP::XML::Tokenizer; |
| 8 | use KorAP::XML::Krill; |
Akron | d1eed34 | 2016-01-25 20:55:55 +0100 | [diff] [blame] | 9 | use utf8; |
| 10 | |
| 11 | my $path = catdir(dirname(__FILE__), 'TEST', 'BSP', 1); |
| 12 | |
Akron | 8fa76bc | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 13 | ok(my $doc = KorAP::XML::Krill->new( |
Akron | d1eed34 | 2016-01-25 20:55:55 +0100 | [diff] [blame] | 14 | path => $path . '/' |
| 15 | ), 'Create Document'); |
| 16 | |
| 17 | ok($doc->parse, 'Parse document'); |
| 18 | |
Akron | 8fa76bc | 2016-01-28 15:10:50 +0100 | [diff] [blame] | 19 | ok(my $tokens = KorAP::XML::Tokenizer->new( |
Akron | d1eed34 | 2016-01-25 20:55:55 +0100 | [diff] [blame] | 20 | path => $doc->path, |
| 21 | doc => $doc, |
| 22 | foundry => 'Sgbr', |
| 23 | layer => 'Lemma', |
| 24 | name => 'tokens' |
| 25 | ), 'Create tokens based on lemmata'); |
| 26 | |
| 27 | ok($tokens->parse, 'Parse tokenization based on lemmata'); |
| 28 | |
Akron | 9a04c71 | 2016-02-05 19:40:05 +0100 | [diff] [blame] | 29 | ok($tokens->add('Sgbr', 'Lemma'), 'Add Structure'); |
Akron | d1eed34 | 2016-01-25 20:55:55 +0100 | [diff] [blame] | 30 | |
| 31 | my $data = $tokens->to_data->{data}; |
| 32 | |
| 33 | my $stream = $data->{stream}; |
| 34 | is($stream->[0]->[0], '-:tokens$<i>51', 'Token number'); |
| 35 | is($stream->[0]->[1], '_0$<i>0<i>18', 'Position'); |
| 36 | is($stream->[0]->[2], 'i:sommerüberraschung', 'First term'); |
| 37 | is($stream->[0]->[3], 's:Sommerüberraschung', 'First term'); |
| 38 | is($stream->[0]->[4], 'sgbr/l:Sommerüberraschung', 'First term'); |
| 39 | ok(!defined $stream->[0]->[5], 'First term'); |
| 40 | |
| 41 | is($stream->[1]->[0], '_1$<i>19<i>21', 'Position'); |
| 42 | is($stream->[1]->[1], 'i:es', 'Second term'); |
| 43 | is($stream->[1]->[2], 's:Es', 'Second term'); |
| 44 | is($stream->[1]->[3], 'sgbr/l:es', 'Second term'); |
| 45 | is($stream->[1]->[4], 'sgbr/lv:er', 'Second term'); |
| 46 | is($stream->[1]->[5], 'sgbr/lv:sie', 'Second term'); |
| 47 | |
| 48 | is($stream->[16]->[0], '_16$<i>107<i>115', 'Position'); |
| 49 | is($stream->[16]->[1], 'i:guenther', '16th term'); |
| 50 | is($stream->[16]->[2], 's:Guenther', '16th term'); |
| 51 | is($stream->[16]->[3], 'sgbr/l:Günther', '16th term'); |
| 52 | is($stream->[16]->[4], 'sgbr/lv:Günter', '16th term'); |
| 53 | |
| 54 | is($stream->[-1]->[0], '_50$<i>359<i>364', 'Position'); |
| 55 | is($stream->[-1]->[1], 'i:kevin', 'Last term'); |
| 56 | is($stream->[-1]->[2], 's:Kevin', 'Last term'); |
| 57 | is($stream->[-1]->[3], 'sgbr/l:Kevin', 'Last term'); |
| 58 | |
Akron | cc86a8e | 2016-02-13 21:26:54 +0100 | [diff] [blame^] | 59 | |
| 60 | # Real data 1 |
| 61 | $path = catdir(dirname(__FILE__), 'CMC-TSK', '2014-09', '2843'); |
| 62 | |
| 63 | ok($doc = KorAP::XML::Krill->new( |
| 64 | path => $path . '/' |
| 65 | ), 'Create Document'); |
| 66 | |
| 67 | ok($doc->parse, 'Parse document'); |
| 68 | |
| 69 | ok($tokens = KorAP::XML::Tokenizer->new( |
| 70 | path => $doc->path, |
| 71 | doc => $doc, |
| 72 | foundry => 'Sgbr', |
| 73 | layer => 'Lemma', |
| 74 | name => 'tokens' |
| 75 | ), 'Create tokens based on lemmata'); |
| 76 | |
| 77 | ok($tokens->parse, 'Parse tokenization based on lemmata'); |
| 78 | |
| 79 | ok(!$tokens->add('Sgbr', 'Lemma'), 'Add Structure impossible - no token data'); |
| 80 | |
| 81 | |
| 82 | # Real data 2 |
| 83 | $path = catdir(dirname(__FILE__), 'CMC-TSK', '2014-09', '3401'); |
| 84 | |
| 85 | ok($doc = KorAP::XML::Krill->new( |
| 86 | path => $path . '/' |
| 87 | ), 'Create Document'); |
| 88 | |
| 89 | ok($doc->parse, 'Parse document'); |
| 90 | |
| 91 | ok($tokens = KorAP::XML::Tokenizer->new( |
| 92 | path => $doc->path, |
| 93 | doc => $doc, |
| 94 | foundry => 'Sgbr', |
| 95 | layer => 'Lemma', |
| 96 | name => 'tokens' |
| 97 | ), 'Create tokens based on lemmata'); |
| 98 | |
| 99 | ok($tokens->parse, 'Parse tokenization based on lemmata'); |
| 100 | |
| 101 | ok($tokens->add('Sgbr', 'Lemma'), 'Add Structure'); |
| 102 | |
Akron | d1eed34 | 2016-01-25 20:55:55 +0100 | [diff] [blame] | 103 | done_testing; |
Akron | cc86a8e | 2016-02-13 21:26:54 +0100 | [diff] [blame^] | 104 | |
| 105 | __END__ |