Akron | 414ec95 | 2020-08-03 15:48:43 +0200 | [diff] [blame^] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use File::Basename 'dirname'; |
| 5 | use File::Spec::Functions 'catdir'; |
| 6 | use Data::Dumper; |
| 7 | use KorAP::XML::Tokenizer; |
| 8 | use KorAP::XML::Krill; |
| 9 | use utf8; |
| 10 | |
| 11 | if ($ENV{SKIP_REAL}) { |
| 12 | plan skip_all => 'Skip real tests'; |
| 13 | }; |
| 14 | |
| 15 | my $path = catdir(dirname(__FILE__), 'CMC-TSK', '2014-09', 3401); |
| 16 | |
| 17 | ok(my $doc = KorAP::XML::Krill->new( |
| 18 | path => $path . '/' |
| 19 | ), 'Create Document'); |
| 20 | |
| 21 | ok($doc->parse('Sgbr'), 'Parse document'); |
| 22 | |
| 23 | ok(my $tokens = KorAP::XML::Tokenizer->new( |
| 24 | path => $doc->path, |
| 25 | doc => $doc, |
| 26 | foundry => 'Sgbr', |
| 27 | layer => 'Lemma', |
| 28 | name => 'tokens' |
| 29 | ), 'Create tokens based on lemmata'); |
| 30 | |
| 31 | ok($tokens->parse, 'Parse tokenization based on lemmata'); |
| 32 | |
| 33 | ok($tokens->add('Base', 'Sentences'), 'Add Sentences'); |
| 34 | |
| 35 | my $stream = $tokens->to_data->{data}->{stream}; |
| 36 | |
| 37 | is($stream->[0]->[0], '-:base/sentences$<i>1'); |
| 38 | is($stream->[0]->[1], '-:tokens$<i>15'); |
| 39 | is($stream->[0]->[2], '<>:base/s:t$<b>64<i>0<i>115<i>15<b>0'); |
| 40 | is($stream->[0]->[3], '<>:base/s:s$<b>64<i>16<i>114<i>15<b>2'); |
| 41 | is($stream->[0]->[4], '_0$<i>17<i>18'); |
| 42 | |
| 43 | done_testing; |