Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 1 | use strict; |
| 2 | use warnings; |
| 3 | use Test::More; |
| 4 | use Data::Dumper; |
| 5 | use JSON::XS; |
| 6 | |
| 7 | use Benchmark qw/:hireswallclock/; |
| 8 | |
| 9 | my $t = Benchmark->new; |
| 10 | |
| 11 | use utf8; |
| 12 | use lib 'lib', '../lib'; |
| 13 | |
| 14 | use File::Basename 'dirname'; |
| 15 | use File::Spec::Functions 'catdir'; |
| 16 | |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame^] | 17 | use_ok('KorAP::XML::Krill'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 18 | |
| 19 | # GOE/AGA/03828 |
| 20 | my $path = catdir(dirname(__FILE__), '../corpus/GOE/AGA/03828'); |
| 21 | # my $path = '/home/ndiewald/Repositories/korap/KorAP-sandbox/KorAP-lucene-indexer/t/GOE/AGA/03828'; |
| 22 | |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame^] | 23 | ok(my $doc = KorAP::XML::Krill->new( path => $path . '/' ), 'Load Korap::Document'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 24 | ok($doc->parse, 'Parse document'); |
| 25 | |
| 26 | is($doc->text_sigle, 'GOE_AGA.03828', 'Correct text sigle'); |
| 27 | is($doc->doc_sigle, 'GOE_AGA', 'Correct document sigle'); |
| 28 | is($doc->corpus_sigle, 'GOE', 'Correct corpus sigle'); |
| 29 | |
| 30 | is($doc->title, 'Autobiographische Einzelheiten', 'Title'); |
| 31 | is($doc->pub_place, 'München', 'PubPlace'); |
| 32 | is($doc->pub_date, '19820000', 'Creation Date'); |
| 33 | ok(!$doc->sub_title, 'SubTitle'); |
| 34 | is($doc->author, 'Goethe, Johann Wolfgang von', 'Author'); |
| 35 | |
| 36 | is($doc->publisher, 'Verlag C. H. Beck', 'Publisher'); |
| 37 | ok(!$doc->editor, 'Publisher'); |
| 38 | is($doc->text_type, 'Autobiographie', 'Correct Text Type'); |
| 39 | ok(!$doc->text_type_art, 'Correct Text Type Art'); |
| 40 | ok(!$doc->text_type_ref, 'Correct Text Type Ref'); |
| 41 | ok(!$doc->text_column, 'Correct Text Column'); |
| 42 | ok(!$doc->text_domain, 'Correct Text Domain'); |
| 43 | is($doc->creation_date, '18200000', 'Creation Date'); |
| 44 | is($doc->license, 'QAO-NC', 'License'); |
| 45 | is($doc->pages, '529-547', 'Pages'); |
| 46 | ok(!$doc->file_edition_statement, 'File Ed Statement'); |
| 47 | ok(!$doc->bibl_edition_statement, 'Bibl Ed Statement'); |
| 48 | is($doc->reference . "\n", <<'REF', 'Author'); |
| 49 | Goethe, Johann Wolfgang von: Autobiographische Einzelheiten, (Geschrieben bis 1832), In: Goethe, Johann Wolfgang von: Goethes Werke, Bd. 10, Autobiographische Schriften II, Hrsg.: Trunz, Erich. München: Verlag C. H. Beck, 1982, S. 529-547 |
| 50 | REF |
| 51 | is($doc->language, 'de', 'Language'); |
| 52 | |
| 53 | |
| 54 | is($doc->corpus_title, 'Goethes Werke', 'Correct Corpus title'); |
| 55 | ok(!$doc->corpus_sub_title, 'Correct Corpus Sub title'); |
| 56 | is($doc->corpus_author, 'Goethe, Johann Wolfgang von', 'Correct Corpus author'); |
| 57 | is($doc->corpus_editor, 'Trunz, Erich', 'Correct Corpus editor'); |
| 58 | |
| 59 | is($doc->doc_title, 'Goethe: Autobiographische Schriften II, (1817-1825, 1832)', |
| 60 | 'Correct Doc title'); |
| 61 | ok(!$doc->doc_sub_title, 'Correct Doc Sub title'); |
| 62 | ok(!$doc->doc_author, 'Correct Doc author'); |
| 63 | ok(!$doc->doc_editor, 'Correct Doc editor'); |
| 64 | |
| 65 | # Tokenization |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame^] | 66 | use_ok('KorAP::XML::Tokenizer'); |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 67 | |
| 68 | my ($token_base_foundry, $token_base_layer) = (qw/OpenNLP Tokens/); |
| 69 | |
| 70 | # Get tokenization |
Akron | e4c2e41 | 2016-01-28 15:10:50 +0100 | [diff] [blame^] | 71 | my $tokens = KorAP::XML::Tokenizer->new( |
Akron | 9c0488f | 2016-01-28 14:17:15 +0100 | [diff] [blame] | 72 | path => $doc->path, |
| 73 | doc => $doc, |
| 74 | foundry => $token_base_foundry, |
| 75 | layer => $token_base_layer, |
| 76 | name => 'tokens' |
| 77 | ); |
| 78 | ok($tokens, 'Token Object is fine'); |
| 79 | ok($tokens->parse, 'Token parsing is fine'); |
| 80 | |
| 81 | my $output = decode_json( $tokens->to_json ); |
| 82 | |
| 83 | is(substr($output->{data}->{text}, 0, 100), 'Autobiographische einzelheiten Selbstschilderung (1) immer tätiger, nach innen und außen fortwirkend', 'Primary Data'); |
| 84 | is($output->{data}->{name}, 'tokens', 'tokenName'); |
| 85 | is($output->{data}->{tokenSource}, 'opennlp#tokens', 'tokenSource'); |
| 86 | is($output->{version}, '0.03', 'version'); |
| 87 | is($output->{data}->{foundries}, '', 'Foundries'); |
| 88 | is($output->{data}->{layerInfos}, '', 'layerInfos'); |
| 89 | is($output->{data}->{stream}->[0]->[3], 's:Autobiographische', 'data'); |
| 90 | |
| 91 | is($output->{textSigle}, 'GOE_AGA.03828', 'Correct text sigle'); |
| 92 | is($output->{docSigle}, 'GOE_AGA', 'Correct document sigle'); |
| 93 | is($output->{corpusSigle}, 'GOE', 'Correct corpus sigle'); |
| 94 | |
| 95 | is($output->{author}, 'Goethe, Johann Wolfgang von', 'Author'); |
| 96 | is($output->{pubPlace}, 'München', 'PubPlace'); |
| 97 | is($output->{pubDate}, '19820000', 'Creation Date'); |
| 98 | is($output->{title}, 'Autobiographische Einzelheiten', 'Title'); |
| 99 | ok(!exists $output->{subTitle}, 'subTitle'); |
| 100 | |
| 101 | is($output->{publisher}, 'Verlag C. H. Beck', 'Publisher'); |
| 102 | ok(!exists $output->{editor}, 'Editor'); |
| 103 | is($output->{textType}, 'Autobiographie', 'Correct Text Type'); |
| 104 | ok(!exists $output->{textTypeArt}, 'Correct Text Type'); |
| 105 | ok(!exists $output->{textTypeRef}, 'Correct Text Type'); |
| 106 | ok(!exists $output->{textColumn}, 'Correct Text Type'); |
| 107 | ok(!exists $output->{textDomain}, 'Correct Text Type'); |
| 108 | is($output->{creationDate}, '18200000', 'Creation Date'); |
| 109 | is($output->{license}, 'QAO-NC', 'License'); |
| 110 | is($output->{pages}, '529-547', 'Pages'); |
| 111 | ok(!exists $output->{fileEditionStatement}, 'Correct Text Type'); |
| 112 | ok(!exists $output->{biblEditionStatement}, 'Correct Text Type'); |
| 113 | is($output->{reference} . "\n", <<'REF', 'Author'); |
| 114 | Goethe, Johann Wolfgang von: Autobiographische Einzelheiten, (Geschrieben bis 1832), In: Goethe, Johann Wolfgang von: Goethes Werke, Bd. 10, Autobiographische Schriften II, Hrsg.: Trunz, Erich. München: Verlag C. H. Beck, 1982, S. 529-547 |
| 115 | REF |
| 116 | is($output->{language}, 'de', 'Language'); |
| 117 | |
| 118 | is($output->{corpusTitle}, 'Goethes Werke', 'Correct Corpus title'); |
| 119 | ok(!exists $output->{corpusSubTitle}, 'Correct Text Type'); |
| 120 | is($output->{corpusAuthor}, 'Goethe, Johann Wolfgang von', 'Correct Corpus title'); |
| 121 | is($output->{corpusEditor}, 'Trunz, Erich', 'Editor'); |
| 122 | |
| 123 | is($output->{docTitle}, 'Goethe: Autobiographische Schriften II, (1817-1825, 1832)', 'Correct Corpus title'); |
| 124 | ok(!exists $output->{docSubTitle}, 'Correct Text Type'); |
| 125 | ok(!exists $output->{docAuthor}, 'Correct Text Type'); |
| 126 | ok(!exists $output->{docEditor}, 'Correct Text Type'); |
| 127 | |
| 128 | ## Base |
| 129 | $tokens->add('Base', 'Sentences'); |
| 130 | $tokens->add('Base', 'Paragraphs'); |
| 131 | |
| 132 | $output = decode_json( $tokens->to_json ); |
| 133 | |
| 134 | is($output->{data}->{foundries}, 'base base/paragraphs base/sentences', 'Foundries'); |
| 135 | is($output->{data}->{layerInfos}, 'base/s=spans', 'layerInfos'); |
| 136 | my $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 137 | like($first_token, qr/s:Autobiographische/, 'data'); |
| 138 | like($first_token, qr/_0\$<i>0<i>17/, 'data'); |
| 139 | like($first_token, qr!<>:base/s:s\$<b>64<i>0<i>30<i>2<b>2!, 'data'); |
| 140 | like($first_token, qr!<>:base\/s:t\$<b>64<i>0<i>35199<i>5226<b>0!, 'data'); |
| 141 | |
| 142 | ## OpenNLP |
| 143 | $tokens->add('OpenNLP', 'Sentences'); |
| 144 | |
| 145 | $output = decode_json( $tokens->to_json ); |
| 146 | is($output->{data}->{foundries}, |
| 147 | 'base base/paragraphs base/sentences opennlp opennlp/sentences', |
| 148 | 'Foundries'); |
| 149 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/s=spans', 'layerInfos'); |
| 150 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 151 | like($first_token, qr!<>:opennlp/s:s\$<b>64<i>0<i>254<i>32!, 'data'); |
| 152 | |
| 153 | $tokens->add('OpenNLP', 'Morpho'); |
| 154 | $output = decode_json( $tokens->to_json ); |
| 155 | is($output->{data}->{foundries}, |
| 156 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences', |
| 157 | 'Foundries'); |
| 158 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans', 'layerInfos'); |
| 159 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 160 | like($first_token, qr!opennlp/p:ADJA!, 'data'); |
| 161 | |
| 162 | ## Treetagger |
| 163 | $tokens->add('TreeTagger', 'Sentences'); |
| 164 | $output = decode_json( $tokens->to_json ); |
| 165 | is($output->{data}->{foundries}, |
| 166 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/sentences', |
| 167 | 'Foundries'); |
| 168 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/s=spans', 'layerInfos'); |
| 169 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 170 | like($first_token, qr!<>:tt/s:s\$<b>64<i>0<i>179<i>21<b>0!, 'data'); |
| 171 | |
| 172 | $tokens->add('TreeTagger', 'Morpho'); |
| 173 | $output = decode_json( $tokens->to_json ); |
| 174 | is($output->{data}->{foundries}, |
| 175 | 'base base/paragraphs base/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', |
| 176 | 'Foundries'); |
| 177 | |
| 178 | is($output->{data}->{layerInfos}, 'base/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); |
| 179 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 180 | like($first_token, qr!tt/l:autobiographisch\$<b>129<b>165!, 'data'); |
| 181 | like($first_token, qr!tt/p:ADJA\$<b>129<b>165!, 'data'); |
| 182 | like($first_token, qr!tt/l:Autobiographische\$<b>129<b>89!, 'data'); |
| 183 | like($first_token, qr!tt/p:NN\$<b>129<b>89!, 'data'); |
| 184 | |
| 185 | ## CoreNLP |
| 186 | $tokens->add('CoreNLP', 'NamedEntities'); |
| 187 | $output = decode_json( $tokens->to_json ); |
| 188 | is($output->{data}->{foundries}, |
| 189 | 'base base/paragraphs base/sentences corenlp corenlp/namedentities opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', |
| 190 | 'Foundries'); |
| 191 | is($output->{data}->{layerInfos}, 'base/s=spans corenlp/ne=tokens opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); |
| 192 | |
| 193 | # diag "Missing test for NamedEntities"; |
| 194 | |
| 195 | # Problematic: |
| 196 | # diag Dumper $output->{data}->{stream}->[180]; |
| 197 | # diag Dumper $output->{data}->{stream}->[341]; |
| 198 | |
| 199 | $tokens->add('CoreNLP', 'Sentences'); |
| 200 | $output = decode_json( $tokens->to_json ); |
| 201 | is($output->{data}->{foundries}, |
| 202 | 'base base/paragraphs base/sentences corenlp corenlp/namedentities corenlp/sentences opennlp opennlp/morpho opennlp/sentences treetagger treetagger/morpho treetagger/sentences', |
| 203 | 'Foundries'); |
| 204 | is($output->{data}->{layerInfos}, 'base/s=spans corenlp/ne=tokens corenlp/s=spans opennlp/p=tokens opennlp/s=spans tt/l=tokens tt/p=tokens tt/s=spans', 'layerInfos'); |
| 205 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 206 | like($first_token, qr!<>:corenlp/s:s\$<b>64<i>0<i>254<i>32<b>0!, 'data'); |
| 207 | |
| 208 | $tokens->add('CoreNLP', 'Morpho'); |
| 209 | $output = decode_json( $tokens->to_json ); |
| 210 | like($output->{data}->{foundries}, qr!corenlp/morpho!, 'Foundries'); |
| 211 | like($output->{data}->{layerInfos}, qr!corenlp/p=tokens!, 'layerInfos'); |
| 212 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 213 | like($first_token, qr!corenlp/p:ADJA!, 'data'); |
| 214 | |
| 215 | $tokens->add('CoreNLP', 'Constituency'); |
| 216 | $output = decode_json( $tokens->to_json ); |
| 217 | like($output->{data}->{foundries}, qr!corenlp/constituency!, 'Foundries'); |
| 218 | like($output->{data}->{layerInfos}, qr!corenlp/c=spans!, 'layerInfos'); |
| 219 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 220 | like($first_token, qr!<>:corenlp/c:NP\$<b>64<i>0<i>17<i>1<b>6!, 'data'); |
| 221 | like($first_token, qr!<>:corenlp/c:CNP\$<b>64<i>0<i>17<i>1<b>7!, 'data'); |
| 222 | like($first_token, qr!<>:corenlp/c:NP\$<b>64<i>0<i>17<i>1<b>8!, 'data'); |
| 223 | like($first_token, qr!<>:corenlp/c:AP\$<b>64<i>0<i>17<i>1<b>9!, 'data'); |
| 224 | like($first_token, qr!<>:corenlp/c:PP\$<b>64<i>0<i>50<i>3<b>4!, 'data'); |
| 225 | like($first_token, qr!<>:corenlp/c:S\$<b>64<i>0<i>50<i>3<b>5!, 'data'); |
| 226 | like($first_token, qr!<>:corenlp/c:PP\$<b>64<i>0<i>58<i>5<b>2!, 'data'); |
| 227 | like($first_token, qr!<>:corenlp/c:S\$<b>64<i>0<i>58<i>5<b>3!, 'data'); |
| 228 | like($first_token, qr!<>:corenlp/c:ROOT\$<b>64<i>0<i>254<i>32<b>0!, 'data'); |
| 229 | like($first_token, qr!<>:corenlp/c:S\$<b>64<i>0<i>254<i>32<b>1!, 'data'); |
| 230 | |
| 231 | ## Glemm |
| 232 | $tokens->add('Glemm', 'Morpho'); |
| 233 | $output = decode_json( $tokens->to_json ); |
| 234 | like($output->{data}->{foundries}, qr!glemm/morpho!, 'Foundries'); |
| 235 | like($output->{data}->{layerInfos}, qr!glemm/l=tokens!, 'layerInfos'); |
| 236 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 237 | like($first_token, qr!glemm/l:__autobiographisch!, 'data'); |
| 238 | like($first_token, qr!glemm/l:\+_Auto!, 'data'); |
| 239 | like($first_token, qr!glemm/l:\+_biographisch!, 'data'); |
| 240 | like($first_token, qr!glemm/l:\+\+Biograph!, 'data'); |
| 241 | like($first_token, qr!glemm/l:\+\+-isch!, 'data'); |
| 242 | |
| 243 | ## Connexor |
| 244 | $tokens->add('Connexor', 'Sentences'); |
| 245 | $output = decode_json( $tokens->to_json ); |
| 246 | like($output->{data}->{foundries}, qr!connexor/sentences!, 'Foundries'); |
| 247 | like($output->{data}->{layerInfos}, qr!cnx/s=spans!, 'layerInfos'); |
| 248 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 249 | like($first_token, qr!<>:cnx/s:s\$<b>64<i>0<i>179<i>21<b>0!, 'data'); |
| 250 | |
| 251 | $tokens->add('Connexor', 'Morpho'); |
| 252 | $output = decode_json( $tokens->to_json ); |
| 253 | like($output->{data}->{foundries}, qr!connexor/morpho!, 'Foundries'); |
| 254 | like($output->{data}->{layerInfos}, qr!cnx/p=tokens!, 'layerInfos'); |
| 255 | like($output->{data}->{layerInfos}, qr!cnx/l=tokens!, 'layerInfos'); |
| 256 | like($output->{data}->{layerInfos}, qr!cnx/m=tokens!, 'layerInfos'); |
| 257 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 258 | like($first_token, qr!cnx/l:autobiografisch!, 'data'); |
| 259 | like($first_token, qr!cnx/p:A!, 'data'); |
| 260 | |
| 261 | $tokens->add('Connexor', 'Phrase'); |
| 262 | $output = decode_json( $tokens->to_json ); |
| 263 | like($output->{data}->{foundries}, qr!connexor/phrase!, 'Foundries'); |
| 264 | like($output->{data}->{layerInfos}, qr!cnx/c=spans!, 'layerInfos'); |
| 265 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 266 | like($first_token, qr!<>:cnx/c:np\$<b>64<i>0<i>30<i>2!, 'data'); |
| 267 | |
| 268 | $tokens->add('Connexor', 'Syntax'); |
| 269 | $output = decode_json( $tokens->to_json ); |
| 270 | like($output->{data}->{foundries}, qr!connexor/syntax!, 'Foundries'); |
| 271 | like($output->{data}->{layerInfos}, qr!cnx/syn=tokens!, 'layerInfos'); |
| 272 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 273 | like($first_token, qr!cnx/syn:\@PREMOD!, 'data'); |
| 274 | |
| 275 | ## Mate |
| 276 | $tokens->add('Mate', 'Morpho'); |
| 277 | $output = decode_json( $tokens->to_json ); |
| 278 | like($output->{data}->{foundries}, qr!mate/morpho!, 'Foundries'); |
| 279 | like($output->{data}->{layerInfos}, qr!mate/p=tokens!, 'layerInfos'); |
| 280 | like($output->{data}->{layerInfos}, qr!mate/l=tokens!, 'layerInfos'); |
| 281 | like($output->{data}->{layerInfos}, qr!mate/m=tokens!, 'layerInfos'); |
| 282 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 283 | like($first_token, qr!mate/l:autobiographisch!, 'data'); |
| 284 | like($first_token, qr!mate/p:NN!, 'data'); |
| 285 | like($first_token, qr!mate/m:case:nom!, 'data'); |
| 286 | like($first_token, qr!mate/m:number:pl!, 'data'); |
| 287 | like($first_token, qr!mate/m:gender:\*!, 'data'); |
| 288 | |
| 289 | # diag "No test for mate dependency"; |
| 290 | |
| 291 | ## XIP |
| 292 | $tokens->add('XIP', 'Sentences'); |
| 293 | $output = decode_json( $tokens->to_json ); |
| 294 | like($output->{data}->{foundries}, qr!xip/sentences!, 'Foundries'); |
| 295 | like($output->{data}->{layerInfos}, qr!xip/s=spans!, 'layerInfos'); |
| 296 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 297 | like($first_token, qr!<>:xip/s:s\$<b>64<i>0<i>179<i>21!, 'data'); |
| 298 | |
| 299 | $tokens->add('XIP', 'Morpho'); |
| 300 | $output = decode_json( $tokens->to_json ); |
| 301 | like($output->{data}->{foundries}, qr!xip/morpho!, 'Foundries'); |
| 302 | like($output->{data}->{layerInfos}, qr!xip/l=tokens!, 'layerInfos'); |
| 303 | like($output->{data}->{layerInfos}, qr!xip/p=tokens!, 'layerInfos'); |
| 304 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 305 | like($first_token, qr!<>:xip/s:s\$<b>64<i>0<i>179<i>21!, 'data'); |
| 306 | |
| 307 | $tokens->add('XIP', 'Constituency'); |
| 308 | $output = decode_json( $tokens->to_json ); |
| 309 | like($output->{data}->{foundries}, qr!xip/constituency!, 'Foundries'); |
| 310 | like($output->{data}->{layerInfos}, qr!xip/c=spans!, 'layerInfos'); |
| 311 | $first_token = join('||', @{$output->{data}->{stream}->[0]}); |
| 312 | like($first_token, qr!<>:xip/c:NP\$<b>64<i>0<i>17<i>1<b>1!, 'data'); |
| 313 | like($first_token, qr!<>:xip/c:AP\$<b>64<i>0<i>17<i>1<b>2!, 'data'); |
| 314 | like($first_token, qr!<>:xip/c:ADJ\$<b>64<i>0<i>17<i>1<b>3!, 'data'); |
| 315 | like($first_token, qr!<>:xip/c:TOP\$<b>64<i>0<i>179<i>21<b>0!, 'data'); |
| 316 | |
| 317 | # diag "No test for xip dependency"; |
| 318 | |
| 319 | # print timestr(timediff(Benchmark->new, $t)); |
| 320 | |
| 321 | done_testing; |
| 322 | __END__ |