Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use Data::Dumper; |
| 5 | use JSON::XS; |
| 6 | |
| 7 | use Benchmark qw/:hireswallclock/; |
| 8 | |
| 9 | my $t = Benchmark->new; |
| 10 | |
| 11 | use utf8; |
| 12 | use lib 'lib', '../lib'; |
| 13 | |
| 14 | use File::Basename 'dirname'; |
| 15 | use File::Spec::Functions 'catdir'; |
| 16 | |
| 17 | use_ok('KorAP::XML::Krill'); |
| 18 | |
| 19 | # This will Check LWC annotations |
| 20 | |
| 21 | # New |
| 22 | |
| 23 | my $path = catdir(dirname(__FILE__), '../corpus/WPD17/000/22053'); |
| 24 | |
| 25 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 26 | ok($doc->parse, 'Parse document'); |
| 27 | |
| 28 | is($doc->text_sigle, 'WPD17/000/22053', 'Correct text sigle'); |
| 29 | is($doc->doc_sigle, 'WPD17/000', 'Correct document sigle'); |
| 30 | is($doc->corpus_sigle, 'WPD17', 'Correct corpus sigle'); |
| 31 | |
| 32 | my $meta = $doc->meta; |
| 33 | is($meta->{title}, '0er', 'Title'); |
| 34 | is($meta->{pub_place}, 'URL:http://de.wikipedia.org', 'PubPlace'); |
| 35 | is($meta->{pub_date}, '20170701', 'Creation Date'); |
| 36 | ok(!$meta->{sub_title}, 'SubTitle'); |
| 37 | is($meta->{author}, 'Rogi.Official, u.a.', 'Author'); |
| 38 | |
| 39 | is($meta->{publisher}, 'Wikipedia', 'Publisher'); |
| 40 | is($meta->{editor},'wikipedia.org', 'Editor'); |
| 41 | ok(!$meta->{translator}, 'Translator'); |
| 42 | is($meta->{text_type}, 'Enzyklopädie', 'Correct Text Type'); |
| 43 | is($meta->{text_type_art}, 'Enzyklopädie-Artikel', 'Correct Text Type Art'); |
| 44 | ok(!$meta->{text_type_ref}, 'Correct Text Type Ref'); |
| 45 | ok(!$meta->{text_column}, 'Correct Text Column'); |
| 46 | ok(!$meta->{text_domain}, 'Correct Text Domain'); |
| 47 | is($meta->{creation_date},'20150511', 'Creation Date'); |
| 48 | |
| 49 | ok(!$meta->{pages}, 'Pages'); |
| 50 | ok(!$meta->{file_edition_statement}, 'File Ed Statement'); |
| 51 | ok(!$meta->{bibl_edition_statement}, 'Bibl Ed Statement'); |
| 52 | is($meta->{reference}, '0er, In: Wikipedia - URL:http://de.wikipedia.org/wiki/0er: Wikipedia, 2017', 'Reference'); |
| 53 | is($meta->{language}, 'de', 'Language'); |
| 54 | |
| 55 | is($meta->{corpus_title}, 'Wikipedia', 'Correct Corpus title'); |
| 56 | ok(!$meta->{corpus_sub_title}, 'Correct Corpus Sub title'); |
| 57 | ok(!$meta->{corpus_author}, 'Correct Corpus author'); |
| 58 | is($meta->{corpus_editor}, 'wikipedia.org', 'Correct Corpus editor'); |
| 59 | |
| 60 | is($meta->{doc_title}, 'Wikipedia, Artikel mit Anfangszahl 0, Teil 00', 'Correct Doc title'); |
| 61 | ok(!$meta->{doc_sub_title}, 'Correct Doc Sub title'); |
| 62 | ok(!$meta->{doc_author}, 'Correct Doc author'); |
| 63 | ok(!$meta->{doc_editor}, 'Correct Doc editor'); |
| 64 | |
| 65 | # Tokenization |
| 66 | use_ok('KorAP::XML::Tokenizer'); |
| 67 | |
| 68 | my ($token_base_foundry, $token_base_layer) = (qw/Base Tokens/); |
| 69 | |
| 70 | # Get tokenization |
| 71 | my $tokens = KorAP::XML::Tokenizer->new( |
| 72 | path => $doc->path, |
| 73 | doc => $doc, |
| 74 | foundry => $token_base_foundry, |
| 75 | layer => $token_base_layer, |
| 76 | name => 'tokens' |
| 77 | ); |
| 78 | ok($tokens, 'Token Object is fine'); |
| 79 | ok($tokens->parse, 'Token parsing is fine'); |
| 80 | |
| 81 | my $output = decode_json( $tokens->to_json ); |
| 82 | |
| 83 | ## Base |
| 84 | $tokens->add('DeReKo', 'Structure', 'base_sentences_paragraphs'); |
| 85 | |
| 86 | # LWC |
| 87 | ok($tokens->add('LWC', 'Dependency'), 'Add LWC dependency annotations'); |
| 88 | |
| 89 | $output = $tokens->to_data; |
| 90 | |
| 91 | is($output->{data}->{foundries}, |
| 92 | 'dereko dereko/structure dereko/structure/base_sentences_paragraphs lwc lwc/dependency', |
| 93 | 'Foundries'); |
| 94 | |
| 95 | is($output->{data}->{layerInfos}, 'dereko/s=spans lwc/d=rels', 'layerInfos'); |
| 96 | |
| 97 | my $token = join('||', @{$output->{data}->{stream}->[7]}); |
| 98 | |
Akron | 6727b21 | 2018-01-17 13:50:09 +0100 | [diff] [blame] | 99 | like($token, qr!>:lwc/d:SVP\$<b>32<i>4!, 'data'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 100 | like($token, qr!i:statt!, 'data'); |
| 101 | |
| 102 | $token = join('||', @{$output->{data}->{stream}->[9]}); |
| 103 | |
Akron | 6727b21 | 2018-01-17 13:50:09 +0100 | [diff] [blame] | 104 | like($token, qr!>:lwc/d:--\$<b>33<i>64<i>76<i>8<i>11!, 'data'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 105 | like($token, qr!s:Januar!, 'data'); |
| 106 | |
| 107 | |
| 108 | $path = catdir(dirname(__FILE__), '../corpus/WPD17/060/18486'); |
| 109 | |
| 110 | ok($doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
| 111 | ok($doc->parse, 'Parse document'); |
| 112 | |
| 113 | $meta = $doc->meta; |
| 114 | |
| 115 | is($meta->{doc_title}, 'Wikipedia, Artikel mit Anfangszahl 0, Teil 60', 'No doc title'); |
| 116 | ok(!exists $meta->{translator}, 'No translator'); |
| 117 | |
| 118 | is($meta->{text_class}->[0], 'staat-gesellschaft', 'text class'); |
| 119 | is($meta->{text_class}->[1], 'verbrechen', 'text class'); |
| 120 | |
| 121 | |
| 122 | |
| 123 | |
| 124 | # Get tokenization |
| 125 | $tokens = KorAP::XML::Tokenizer->new( |
| 126 | path => $doc->path, |
| 127 | doc => $doc, |
| 128 | foundry => $token_base_foundry, |
| 129 | layer => $token_base_layer, |
| 130 | name => 'tokens' |
| 131 | ); |
| 132 | ok($tokens, 'Token Object is fine'); |
| 133 | ok($tokens->parse, 'Token parsing is fine'); |
| 134 | |
| 135 | ## Base |
| 136 | $tokens->add('DeReKo', 'Structure', 'base_sentences_paragraphs'); |
| 137 | |
| 138 | # LWC |
| 139 | ok($tokens->add('LWC', 'Dependency'), 'Add LWC dependency annotations'); |
| 140 | |
| 141 | $output = decode_json( $tokens->to_json ); |
| 142 | |
| 143 | $token = join('||', @{$output->{data}->{stream}->[2]}); |
| 144 | |
Akron | 6727b21 | 2018-01-17 13:50:09 +0100 | [diff] [blame] | 145 | like($token, qr!>:lwc/d:SVP\$<b>32<i>1!, 'data'); |
Akron | 4c67919 | 2018-01-16 17:41:49 +0100 | [diff] [blame] | 146 | like($token, qr!s:für!, 'data'); |
| 147 | |
| 148 | |
| 149 | done_testing; |
| 150 | __END__ |