Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use Data::Dumper; |
| 5 | use JSON::XS; |
| 6 | |
Akron | fab17d3 | 2020-07-31 14:38:29 +0200 | [diff] [blame^] | 7 | if ($ENV{SKIP_REAL}) { |
| 8 | plan skip_all => 'Skip real tests'; |
| 9 | }; |
| 10 | |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 11 | use Benchmark qw/:hireswallclock/; |
| 12 | |
| 13 | my $t = Benchmark->new; |
| 14 | |
| 15 | use utf8; |
| 16 | use lib 'lib', '../lib'; |
| 17 | |
| 18 | use File::Basename 'dirname'; |
| 19 | use File::Spec::Functions 'catdir'; |
| 20 | |
| 21 | use_ok('KorAP::XML::Krill'); |
| 22 | |
| 23 | # This will Check LWC annotations |
| 24 | |
| 25 | # New |
| 26 | |
| 27 | my $path = catdir(dirname(__FILE__), '../corpus/WPD17/000/22053'); |
| 28 | |
| 29 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 30 | ok($doc->parse, 'Parse document'); |
| 31 | |
| 32 | is($doc->text_sigle, 'WPD17/000/22053', 'Correct text sigle'); |
| 33 | is($doc->doc_sigle, 'WPD17/000', 'Correct document sigle'); |
| 34 | is($doc->corpus_sigle, 'WPD17', 'Correct corpus sigle'); |
| 35 | |
| 36 | my $meta = $doc->meta; |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 37 | is($meta->{T_title}, '0er', 'Title'); |
| 38 | is($meta->{S_pub_place}, 'URL:http://de.wikipedia.org', 'PubPlace'); |
| 39 | is($meta->{D_pub_date}, '20170701', 'Creation Date'); |
| 40 | ok(!$meta->{T_sub_title}, 'SubTitle'); |
| 41 | is($meta->{T_author}, 'Rogi.Official, u.a.', 'Author'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 42 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 43 | is($meta->{A_publisher}, 'Wikipedia', 'Publisher'); |
| 44 | is($meta->{A_editor},'wikipedia.org', 'Editor'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 45 | ok(!$meta->{translator}, 'Translator'); |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 46 | is($meta->{S_text_type}, 'Enzyklopädie', 'Correct Text Type'); |
| 47 | is($meta->{S_text_type_art}, 'Enzyklopädie-Artikel', 'Correct Text Type Art'); |
| 48 | ok(!$meta->{S_text_type_ref}, 'Correct Text Type Ref'); |
| 49 | ok(!$meta->{S_text_column}, 'Correct Text Column'); |
| 50 | ok(!$meta->{S_text_domain}, 'Correct Text Domain'); |
| 51 | is($meta->{D_creation_date},'20150511', 'Creation Date'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 52 | |
| 53 | ok(!$meta->{pages}, 'Pages'); |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 54 | ok(!$meta->{A_file_edition_statement}, 'File Ed Statement'); |
| 55 | ok(!$meta->{A_bibl_edition_statement}, 'Bibl Ed Statement'); |
| 56 | is($meta->{A_reference}, '0er, In: Wikipedia - URL:http://de.wikipedia.org/wiki/0er: Wikipedia, 2017', 'Reference'); |
| 57 | is($meta->{S_language}, 'de', 'Language'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 58 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 59 | is($meta->{T_corpus_title}, 'Wikipedia', 'Correct Corpus title'); |
| 60 | ok(!$meta->{T_corpus_sub_title}, 'Correct Corpus Sub title'); |
| 61 | ok(!$meta->{T_corpus_author}, 'Correct Corpus author'); |
| 62 | is($meta->{A_corpus_editor}, 'wikipedia.org', 'Correct Corpus editor'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 63 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 64 | is($meta->{T_doc_title}, 'Wikipedia, Artikel mit Anfangszahl 0, Teil 00', 'Correct Doc title'); |
| 65 | ok(!$meta->{T_doc_sub_title}, 'Correct Doc Sub title'); |
| 66 | ok(!$meta->{T_doc_author}, 'Correct Doc author'); |
| 67 | ok(!$meta->{A_doc_editor}, 'Correct Doc editor'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 68 | |
| 69 | # Tokenization |
| 70 | use_ok('KorAP::XML::Tokenizer'); |
| 71 | |
| 72 | my ($token_base_foundry, $token_base_layer) = (qw/Base Tokens/); |
| 73 | |
| 74 | # Get tokenization |
| 75 | my $tokens = KorAP::XML::Tokenizer->new( |
| 76 | path => $doc->path, |
| 77 | doc => $doc, |
| 78 | foundry => $token_base_foundry, |
| 79 | layer => $token_base_layer, |
| 80 | name => 'tokens' |
| 81 | ); |
| 82 | ok($tokens, 'Token Object is fine'); |
| 83 | ok($tokens->parse, 'Token parsing is fine'); |
| 84 | |
| 85 | my $output = decode_json( $tokens->to_json ); |
| 86 | |
| 87 | ## Base |
| 88 | $tokens->add('DeReKo', 'Structure', 'base_sentences_paragraphs'); |
| 89 | |
| 90 | # LWC |
| 91 | ok($tokens->add('LWC', 'Dependency'), 'Add LWC dependency annotations'); |
| 92 | |
| 93 | $output = $tokens->to_data; |
| 94 | |
| 95 | is($output->{data}->{foundries}, |
| 96 | 'dereko dereko/structure dereko/structure/base_sentences_paragraphs lwc lwc/dependency', |
| 97 | 'Foundries'); |
| 98 | |
| 99 | is($output->{data}->{layerInfos}, 'dereko/s=spans lwc/d=rels', 'layerInfos'); |
| 100 | |
| 101 | my $token = join('||', @{$output->{data}->{stream}->[7]}); |
| 102 | |
Akron | 6727b21 | 2018-01-17 13:50:09 +0100 | [diff] [blame] | 103 | like($token, qr!>:lwc/d:SVP\$<b>32<i>4!, 'data'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 104 | like($token, qr!i:statt!, 'data'); |
| 105 | |
| 106 | $token = join('||', @{$output->{data}->{stream}->[9]}); |
| 107 | |
Akron | 6727b21 | 2018-01-17 13:50:09 +0100 | [diff] [blame] | 108 | like($token, qr!>:lwc/d:--\$<b>33<i>64<i>76<i>8<i>11!, 'data'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 109 | like($token, qr!s:Januar!, 'data'); |
| 110 | |
| 111 | |
| 112 | $path = catdir(dirname(__FILE__), '../corpus/WPD17/060/18486'); |
| 113 | |
| 114 | ok($doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 115 | ok($doc->parse, 'Parse document'); |
| 116 | |
| 117 | $meta = $doc->meta; |
| 118 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 119 | is($meta->{T_doc_title}, 'Wikipedia, Artikel mit Anfangszahl 0, Teil 60', 'No doc title'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 120 | ok(!exists $meta->{translator}, 'No translator'); |
| 121 | |
Akron | 5eb3aa0 | 2019-01-25 18:30:47 +0100 | [diff] [blame] | 122 | is($meta->{K_text_class}->[0], 'staat-gesellschaft', 'text class'); |
| 123 | is($meta->{K_text_class}->[1], 'verbrechen', 'text class'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 124 | |
| 125 | |
| 126 | |
| 127 | |
| 128 | # Get tokenization |
| 129 | $tokens = KorAP::XML::Tokenizer->new( |
| 130 | path => $doc->path, |
| 131 | doc => $doc, |
| 132 | foundry => $token_base_foundry, |
| 133 | layer => $token_base_layer, |
| 134 | name => 'tokens' |
| 135 | ); |
| 136 | ok($tokens, 'Token Object is fine'); |
| 137 | ok($tokens->parse, 'Token parsing is fine'); |
| 138 | |
| 139 | ## Base |
| 140 | $tokens->add('DeReKo', 'Structure', 'base_sentences_paragraphs'); |
| 141 | |
| 142 | # LWC |
| 143 | ok($tokens->add('LWC', 'Dependency'), 'Add LWC dependency annotations'); |
| 144 | |
| 145 | $output = decode_json( $tokens->to_json ); |
| 146 | |
| 147 | $token = join('||', @{$output->{data}->{stream}->[2]}); |
| 148 | |
Akron | 6727b21 | 2018-01-17 13:50:09 +0100 | [diff] [blame] | 149 | like($token, qr!>:lwc/d:SVP\$<b>32<i>1!, 'data'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 150 | like($token, qr!s:für!, 'data'); |
| 151 | |
| 152 | |
| 153 | done_testing; |
| 154 | __END__ |