| #!/usr/bin/env perl |
| use strict; |
| use warnings; |
| |
| use Log::Any '$log'; |
| use Log::Any::Adapter; |
| use Pod::Usage; |
| use Getopt::Long qw(GetOptions :config no_auto_abbrev); |
| |
| use File::Basename qw(dirname); |
| |
| use open qw(:std :utf8); # assume utf-8 encoding |
| use Encode qw(encode decode); |
| |
| use XML::CompactTree::XS; |
| use XML::LibXML::Reader; |
| |
| use FindBin; |
| BEGIN { |
| unshift @INC, "$FindBin::Bin/../lib"; |
| }; |
| |
| use KorAP::XML::TEI qw!remove_xml_comments escape_xml!; |
| use KorAP::XML::TEI::Tokenizer::External; |
| use KorAP::XML::TEI::Tokenizer::Conservative; |
| use KorAP::XML::TEI::Tokenizer::Aggressive; |
| use KorAP::XML::TEI::Annotations::Collector; |
| use KorAP::XML::TEI::Zipper; |
| use KorAP::XML::TEI::Header; |
| |
| |
| our $VERSION = '0.01'; |
| |
| our $VERSION_MSG = "\ntei2korapxml - v$VERSION\n"; |
| |
| |
| # Parse options from the command line |
| GetOptions( |
| "root|r=s" => \(my $_root_dir = '.'), # name of root directory inside zip file |
| "input|i=s" => \(my $input_fname = ''), # input file (yet only TEI I5 Format accepted) |
| 'tokenizer-call|tc=s' => \(my $tokenizer_call), # Temporary argument for testing purposes |
| 'use-intern-tokenization|ti' => \(my $tokenizer_intern), # use intern tokenization (default = no) |
| 'log|l=s' => \(my $log_level = 'notice'), |
| 'help|h' => sub { |
| pod2usage( |
| -verbose => 99, |
| -sections => 'NAME|DESCRIPTION|SYNOPSIS|ARGUMENTS|OPTIONS', |
| -msg => $VERSION_MSG, |
| -output => '-' |
| ) |
| }, |
| 'version|v' => sub { |
| pod2usage( |
| -verbose => 0, |
| -msg => $VERSION_MSG, |
| -output => '-' |
| ) |
| } |
| ); |
| |
| Log::Any::Adapter->set('Stderr', log_level => $log_level); |
| |
| # |
| # ~~~ parameter (mandatory) ~~~ |
| # |
| my $_TEXT_BODY = "text"; # tag (without attributes), which contains the primary text |
| # optional |
| my $_CORP_HEADER_BEG = "idsHeader type=\"corpus\""; # just keep the correct order of the attributes and evtl. add an '.*' between them |
| # optional |
| my $_DOC_HEADER_BEG = "idsHeader type=\"document\""; # analog |
| # mandatory |
| my $_TEXT_HEADER_BEG = "idsHeader type=\"text\""; # analog |
| |
| # |
| # ~~~ constants ~~~ |
| # |
| |
| ## extern tokenization |
| my $_GEN_TOK_EXT = $tokenizer_call ? 1 : 0; |
| # TODO: |
| # Read tokenizer call from configuration file. |
| # was 'java -cp '. join(":", ".", glob(&dirname(__FILE__)."/../target/*.jar")). " de.ids_mannheim.korap.tokenizer.KorAPTokenizerImpl"; |
| my $ext_tok; |
| if ($tokenizer_call) { |
| $ext_tok = KorAP::XML::TEI::Tokenizer::External->new($tokenizer_call); |
| }; |
| my $_tok_file_ext = "tokens.xml"; |
| ## |
| |
| ## intern tokenization |
| my $_GEN_TOK_INT = $tokenizer_intern; # simple tokenization (recommended for testing) |
| my $_tok_file_con = "tokens_conservative.xml"; |
| my $_tok_file_agg = "tokens_aggressive.xml"; |
| my $aggr_tok = KorAP::XML::TEI::Tokenizer::Aggressive->new; |
| my $cons_tok = KorAP::XML::TEI::Tokenizer::Conservative->new; |
| ## |
| |
| my $_tok_dir = "base"; # name of directory for storing tokenization files |
| |
| my $_DEBUG = 0; # set to 1 for minimal more debug output (no need to be parametrized) |
| my $_XCT_LN = 0; # only for debugging: include line numbers in elements of $tree_data |
| # (see also manpage of XML::CompactTree::XS) |
| |
| my $_header_file = "header.xml"; # name of files containing the text, document and corpus header |
| my $_data_file = "data.xml"; # name of file containing the primary text data (tokens) |
| my $_structure_dir = "struct"; # name of directory containing the $_structure_file |
| my $_structure_file = "structure.xml"; # name of file containing all tags (except ${_TOKEN_TAG}'s) related information |
| # (= their names and byte offsets in $_data) |
| ## TODO: optional (different annotation tools can produce more zip-files for feeding into KorAP-XML-Krill) |
| my $_TOKENS_PROC = 1; # on/off: processing of ${_TOKEN_TAG}'s (default: 1) |
| my $_tokens_dir = "tokens"; # name of directory containing the $_tokens_file |
| my $_tokens_file = "morpho.xml"; # name of file containing all ${_TOKEN_TAG}'s related information (=their byte offsets in $_data) |
| # - evtl. with additional inline annotations |
| my $_TOKENS_TAG = "w"; # name of tag containing all information stored in $_tokens_file |
| |
| ## TODO: optional |
| # handling inline annotations (inside $_TOKENS_TAG) |
| my $_INLINE_ANNOT = $ENV{KORAPXMLTEI_INLINE}?1:0; # on/off: set to 1 if inline annotations are present and should be processed (default: 0) |
| |
| # TODO: |
| # These parameters are now defunct and moved to Token.pm |
| my $_INLINE_LEM_RD = "lemma"; # from which attribute to read LEMMA information |
| my $_INLINE_ATT_RD = "ana"; # from which attribute to read POS information (and evtl. additional MSD - Morphosyntactic Descriptions) |
| # TODO: The format for the POS and MSD information has to suffice the regular expression ([^ ]+)( (.+))? |
| # - which means, that the POS information can be followed by an optional blank with additional |
| # MSD information; unlike the MSD part, the POS part may not contain any blanks. |
| my $_INLINE_POS_WR = "pos"; # name (inside $_tokens_file) referring to POS information |
| my $_INLINE_MSD_WR = "msd"; # name (inside $_tokens_file) referring to MSD information |
| my $_INLINE_LEM_WR = "lemma"; # name (inside $_tokens_file) referring to LEMMA information |
| ## |
| |
| |
| # |
| # ~~~ variables ~~~ |
| # |
| |
| # Initialize Token- and Structure-Collector |
| my $tokens = KorAP::XML::TEI::Annotations::Collector->new; |
| my $structures = KorAP::XML::TEI::Annotations::Collector->new; |
| |
| |
| # Initialize zipper |
| my $zipper = KorAP::XML::TEI::Zipper->new($_root_dir); |
| my $input_fh; # input file handle (default: stdin) |
| |
| my $data; # contains the primary text (created by func. 'retr_info' from $buf_in), which is written to '$data_file' |
| |
| my $dir; # text directory (below $_root_dir) |
| |
| my ( $text_id, $text_id_esc ); # '$text_id_esc' = escaped version of $text_id (see %ent) |
| |
| my ( $data_prfx1, $data_prfx2, $data_sfx ); # $data_* are written to $_data_file |
| |
| my ( $ref, $idx, $att_idx ); # needed in func. 'write_structures' |
| |
| my ( $reader, # instance of 'XML::LibXML::Reader->new' (on input '$buf_in') |
| $tree_data ); # instance of 'XML::CompactTree::XS::readSubtreeToPerl' (on input '$reader') |
| |
| # these are only used inside recursive function 'retr_info' |
| my ( $_IDX, # value is set dependent on $_XCT_LN - for extracting array of child elements from element in $tree_data |
| $e, # element from $tree_data |
| $dl, # actual length of string $data |
| # represents the actual processed element from @structures |
| ## variables for handling ~ whitespace related issue ~ (it is sometimes necessary, to correct the from-values for some tags) |
| $add_one, # ... |
| $fval, # ... |
| %ws); # hash for indices of whitespace-nodes (needed to recorrect from-values) |
| # idea: when closing element, check if it's from-index minus 1 refers to a whitespace-node |
| # (means: 'from-index - 1' is a key in %ws). |
| # if this is _not_ the case, then the from-value is one to high => correct it by substracting 1 |
| |
| my $c; # index variables used in loops |
| |
| |
| # |
| # ~~~ main ~~~ |
| # |
| |
| # ~ initializations ~ |
| |
| ($_XCT_LN)?($_IDX=5):($_IDX=4); |
| |
| $data_prfx1 = $data_prfx2 = $data_sfx = ""; |
| |
| $fval = 0; |
| |
| # Normalize regex for header parsing |
| for ($_CORP_HEADER_BEG, |
| $_DOC_HEADER_BEG, |
| $_TEXT_HEADER_BEG) { |
| s!^([^\s]+)(.*)$!$1\[\^>\]*$2!; |
| }; |
| |
| $data_prfx1 = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"; |
| $data_prfx1 .= "<?xml-model href=\"text.rng\" type=\"application/xml\" schematypens=\"http://relaxng.org/ns/structure/1.0\"?>\n\n"; |
| $data_prfx1 .= "<raw_text docid=\""; |
| $data_prfx2 .= "\" xmlns=\"http://ids-mannheim.de/ns/KorAP\">\n"; |
| ## TODO: can 'metadata.xml' change or is it constant? |
| $data_prfx2 .= " <metadata file=\"metadata.xml\" />\n"; |
| ## |
| $data_prfx2 .= " <text>"; |
| $data_sfx = "</text>\n</raw_text>"; |
| |
| |
| # ~ read input and write output (text by text) ~ |
| main(); |
| |
| |
| # |
| # ~~~ subs ~~~ |
| # |
| |
| |
| sub main { |
| |
| my ( $pfx, $sfx ); |
| |
| my $tl = 0; # text line (needed for whitespace handling) |
| |
| $input_fh = *STDIN; # input file handle (default: stdin) |
| |
| $data = $dir = ""; |
| |
| |
| if ( $input_fname ne '' ){ |
| |
| open ( $input_fh, "<", "$input_fname") || die "File \'$input_fname\' could not be opened.\n"; |
| |
| } |
| |
| |
| # prevents segfaulting of 'XML::LibXML::Reader' inside 'main()' - see notes on 'PerlIO layers' in 'man XML::LibXML') |
| # removing 'use open qw(:std :utf8)' would fix this problem too, but using binmode on input is more granular |
| # see in perluniintro: You can switch encodings on an already opened stream by using "binmode() |
| # see in perlfunc: If LAYER is omitted or specified as ":raw" the filehandle is made suitable for passing binary data. |
| binmode $input_fh; |
| |
| my $pos; |
| my $l = length('</' . $_TEXT_BODY) + 1; |
| |
| # ~ loop (reading input document) ~ |
| |
| MAIN: while ( <$input_fh> ){ |
| |
| $_ = remove_xml_comments( $input_fh, $_ ); # remove HTML (multi-line) comments (<!--...-->) |
| |
| |
| if ( index($_, $_TEXT_BODY) >= 0 && m#^(.*)<${_TEXT_BODY}(?: [^>]*)?>(.*)$# ){ |
| |
| # ~ start of text body ~ |
| |
| $pfx = $1; $sfx = $2; |
| |
| die "ERROR ($0): main(): input line number $.: line with opening text-body tag '${_TEXT_BODY}'" |
| ." contains additional information ... => Aborting\n\tline=$_" |
| if $pfx !~ /^\s*$/ || $sfx !~ /^\s*$/; |
| |
| # text body data extracted from input document ($input_fh), further processed by XML::LibXML::Reader |
| my $buf_in = ''; |
| |
| # Iterate over all lines in the text body |
| while (<$input_fh>) { |
| |
| $_ = remove_xml_comments( $input_fh, $_ ); |
| |
| # ~ end of text body ~ |
| if (($pos = index($_, '</' . $_TEXT_BODY)) >= 0) { |
| |
| # write data.xml, structure.xml and evtl. morpho.xml and/or tokenization files (s.a.: $_tok_file_ext, $_tok_file_con, $_tok_file_agg) |
| |
| die "ERROR ($0): main(): input line number $.: line with closing text-body tag '${_TEXT_BODY}'" |
| ." contains additional information ... => Aborting\n\tline=$_" |
| if (substr($_, 0, $pos) . substr($_, $l + $pos)) !~ /^\s*$/; |
| |
| if ( $dir ne "" ){ |
| |
| $reader = XML::LibXML::Reader->new( string => "<text>$buf_in</text>", huge => 1 ); |
| |
| # ~ whitespace handling ~ |
| # |
| # Every whitespace inside the processed text is 'significant' and recognized as a node of type 'XML_READER_TYPE_SIGNIFICANT_WHITESPACE' |
| # (see function 'retr_info()'). |
| # |
| # Definition of significant and insignificant whitespace |
| # (source: https://www.oracle.com/technical-resources/articles/wang-whitespace.html): |
| # |
| # Significant whitespace is part of the document content and should be preserved. |
| # Insignificant whitespace is used when editing XML documents for readability. |
| # These whitespaces are typically not intended for inclusion in the delivery of the document. |
| # |
| if ( $_XCT_LN ){ # _XCT_LINE_NUMBERS is only for debugging |
| $tree_data = XML::CompactTree::XS::readSubtreeToPerl( $reader, XCT_DOCUMENT_ROOT | XCT_IGNORE_COMMENTS | XCT_ATTRIBUTE_ARRAY | XCT_LINE_NUMBERS ); |
| } else { |
| $tree_data = XML::CompactTree::XS::readSubtreeToPerl( $reader, XCT_DOCUMENT_ROOT | XCT_IGNORE_COMMENTS | XCT_ATTRIBUTE_ARRAY ); |
| } |
| |
| $structures->reset; |
| |
| if ( $_TOKENS_PROC ){ |
| $tokens->reset; |
| } |
| |
| $dl = 0; |
| |
| # ~ whitespace related issue ~ |
| $add_one = 0; |
| %ws = (); |
| |
| |
| # ~ recursion ~ |
| retr_info(1, \$tree_data->[2] ); # parse input data |
| |
| |
| # ~ write data.xml ~ |
| |
| # TODO: should not be necessary, because whitespace at the end of every input line is removed: see 'whitespace handling' inside text body |
| $data =~ tr/\n\r/ /; # note: 2 blanks - otherwise offset data would become corrupt |
| # |
| |
| |
| # ~ tokenization ~ |
| |
| if ( $_GEN_TOK_EXT ){ |
| |
| # Tokenize and output |
| $ext_tok->tokenize($data)->to_zip( |
| $zipper->new_stream("$dir/$_tok_dir/$_tok_file_ext"), |
| $text_id_esc |
| ); |
| }; |
| |
| if ( $_GEN_TOK_INT ){ |
| |
| # Tokenize and output |
| $cons_tok->tokenize($data)->to_zip( |
| $zipper->new_stream("$dir/$_tok_dir/$_tok_file_con"), |
| $text_id_esc |
| ); |
| |
| $aggr_tok->tokenize($data)->to_zip( |
| $zipper->new_stream("$dir/$_tok_dir/$_tok_file_agg"), |
| $text_id_esc |
| ); |
| |
| $aggr_tok->reset; |
| $cons_tok->reset; |
| }; |
| |
| # Encode and escape data |
| $data = escape_xml(encode( "UTF-8", $data )); |
| # note: the index still refers to the 'single character'-versions, |
| # which are counted as 1 (search for '&' in data.xml and see |
| # corresponding indices in $_tokens_file) |
| |
| if ($_DEBUG) { |
| $log->debug("Writing (utf8-formatted) xml file $dir/$_data_file"); |
| }; |
| |
| $zipper->new_stream("$dir/$_data_file") |
| ->print("$data_prfx1$text_id_esc$data_prfx2$data$data_sfx"); |
| |
| # ~ write structures ~ |
| if (!$structures->empty) { |
| $structures->to_zip( |
| $zipper->new_stream("$dir/$_structure_dir/$_structure_file"), |
| $text_id_esc, |
| 2 # = structure serialization |
| ); |
| }; |
| |
| # ~ write tokens ~ |
| if ($_TOKENS_PROC && !$tokens->empty) { |
| $tokens->to_zip( |
| $zipper->new_stream("$dir/$_tokens_dir/${_tokens_file}"), |
| $text_id_esc, |
| $_INLINE_ANNOT # Either 0 = tokens without inline or 1 = tokens with inline |
| ); |
| }; |
| |
| |
| $data = $dir = ""; # reinit. |
| |
| } else { # $dir eq "" |
| |
| $log->warn("Maybe empty textSigle => skipping this text ...\ndata=$data"); |
| } |
| |
| next MAIN; |
| }; |
| |
| # ~ inside text body ~ |
| |
| # ~ whitespace handling ~ |
| |
| # The idea for the below code fragment was to fix (recreate) missing whitespace in a poorly created corpus, in which linebreaks where inserted |
| # into the text with the addition that maybe (or not) whitespace before those linebreaks was unintenionally stripped. |
| # |
| # It soon turned out, that it was best to suggest considering just avoiding linebreaks and putting all primary text tokens into one line (see |
| # example further down and notes on 'Input restrictions' in the manpage). |
| # |
| # Somehow an old first very poor approach remained, which is not stringent, but also doesn't affect one-line text. |
| # |
| # TODO: Maybe it's best, to keep the stripping of whitespace and to just remove the if-clause and to insert a blank by default (with possibly |
| # an option on how newlines in primary text should be handled (stripped or replaced by a whitespace)). |
| # |
| # Examples (how primary text with linebreaks would be converted by below code): |
| # |
| # '...<w>end</w>\n<w>.</w>...' -> '...<w>end</w> <w>.</w>...' |
| # '...<w>,</w>\n<w>this</w>\n<w>is</w>\n<w>it</w>\n<w>!</w>...' -> '<w>,<w> <w>this</w> <w>is</w> <w>it</w> <w>!</w>'. |
| |
| s/^\s+//; s/\s+$//; # remove consecutive whitespace at beginning and end (mostly one newline) |
| |
| ### NOTE: this is only relevant, if a text consists of more than one line |
| ### TODO: find a better solution, or create a warning, if a text has more than one line ($tl > 1) |
| ### do testing with 2 different corpora (one with only one-line texts, the other with several lines per text) |
| if ( m/<[^>]+>[^<]/ ){ # line contains at least one tag with at least one character contents |
| |
| # NOTE: not stringent ('...' stands for text): |
| # |
| # beg1............................end1 => no blank before 'beg1' |
| # beg2....<pb/>...................end2 => no blank before 'beg2' |
| # beg3....<info attr1="val1"/>....end3 => no blank before 'beg3' |
| # beg4....<test>ok</test>.........end4 => blank before 'beg4' |
| # |
| # => beg1....end1beg2...<pb/>...end2beg3....<info attr1="val1"/>....end3 beg4...<test>ok</test>....end4 |
| # ^ |
| # |_blank between 'end3' and 'beg4' |
| |
| $tl++; # counter for text lines |
| |
| s/^(.)/ $1/ if $tl > 1; # insert blank before 1st character (for 2nd line and consecutive lines) |
| } |
| ### |
| |
| # add line to buffer |
| $buf_in .= $_; |
| }; |
| |
| } elsif ( m#^(.*)(<(?:${_TEXT_HEADER_BEG}|${_DOC_HEADER_BEG}|${_CORP_HEADER_BEG}).*)$# ){ |
| |
| # ~ start of header ~ |
| $pfx = $1; |
| my $content = "$2\n"; |
| |
| die "ERROR ($0): main(): input line number $.: line with opening header tag" |
| ." is not in expected format ... => Aborting\n\tline=$_" |
| if $pfx !~ /^\s*$/; |
| |
| # Parse header |
| my $header = KorAP::XML::TEI::Header->new($content)->parse($input_fh); |
| |
| # Header was parseable |
| if ($header) { |
| |
| # Write header to zip |
| my $file = $header->dir . '/' . $_header_file; |
| |
| $log->debug("Writing file $file") if $_DEBUG; |
| |
| $header->to_zip($zipper->new_stream($file)); |
| |
| # Header is for text level |
| if ($header->type eq 'text') { |
| |
| # Remember dir and sigles |
| $dir = $header->dir; |
| $text_id = $header->id; |
| $text_id_esc = $header->id_esc; |
| |
| # log output for seeing progression |
| $log->notice("$0: main(): text_id=".decode('UTF-8', $text_id )); |
| |
| $tl = 0; # reset (needed for ~ whitespace handling ~) |
| }; |
| } |
| } |
| } #end: while |
| |
| $zipper->close; |
| |
| $ext_tok->close if $_GEN_TOK_EXT; |
| |
| } # end: sub main |
| |
| |
| sub retr_info { # called from main() |
| # recursion level |
| # (1 = topmost level inside retr_info() = should always be level of tag $_TEXT_BODY) |
| my $rl = shift; |
| |
| # Notes on how 'XML::CompactTree::XS' works |
| # |
| # Example: <node a="v"><node1>some <n/> text</node1><node2>more-text</node2></node> |
| # |
| # Print out name of 'node2' for the above example: |
| # |
| # echo '<node a="v"><node1>some <n/> text</node1><node2>more-text</node2></node>' | perl -e 'use XML::CompactTree::XS; use XML::LibXML::Reader; $reader = XML::LibXML::Reader->new(IO => STDIN); $data = XML::CompactTree::XS::readSubtreeToPerl( $reader, XCT_DOCUMENT_ROOT | XCT_IGNORE_COMMENTS | XCT_LINE_NUMBERS ); print "\x27".$data->[2]->[0]->[5]->[1]->[1]."\x27\n"' |
| # |
| # Exploring the structure of $data ( = reference to below array ): |
| # |
| # [ 0: XML_READER_TYPE_DOCUMENT, |
| # 1: ? |
| # 2: [ 0: [ 0: XML_READER_TYPE_ELEMENT <- start recursion with array '$data->[2]' (see main(): retr_info( \$tree_data->[2] )) |
| # 1: 'node' |
| # 2: ? |
| # 3: HASH (attributes) |
| # 4: 1 (line number) |
| # 5: [ 0: [ 0: XML_READER_TYPE_ELEMENT |
| # 1: 'node1' |
| # 2: ? |
| # 3: undefined (no attributes) |
| # 4: 1 (line number) |
| # 5: [ 0: [ 0: XML_READER_TYPE_TEXT |
| # 1: 'some ' |
| # ] |
| # 1: [ 0: XML_READER_TYPE_ELEMENT |
| # 1: 'n' |
| # 2: ? |
| # 3: undefined (no attributes) |
| # 4: 1 (line number) |
| # 5: undefined (no child-nodes) |
| # ] |
| # 2: [ 0: XML_READER_TYPE_TEXT |
| # 1: ' text' |
| # ] |
| # ] |
| # ] |
| # 1: [ 0: XML_READER_TYPE_ELEMENT |
| # 1: 'node2' |
| # 2: ? |
| # 3: undefined (not attributes) |
| # 4: 1 (line number) |
| # 5: [ 0: [ 0: XML_READER_TYPE_TEXT |
| # 1: 'more-text' |
| # ] |
| # ] |
| # ] |
| # ] |
| # ] |
| # ] |
| # ] |
| # |
| # $data->[0] = 9 (=> type == XML_READER_TYPE_DOCUMENT) |
| # |
| # ref($data->[2]) == ARRAY (with 1 element for 'node') |
| # ref($data->[2]->[0]) == ARRAY (with 6 elements) |
| # |
| # $data->[2]->[0]->[0] == 1 (=> type == XML_READER_TYPE_ELEMENT) |
| # $data->[2]->[0]->[1] == 'node' |
| # ref($data->[2]->[0]->[3]) == HASH (=> ${$data->[2]->[0]->[3]}{a} == 'v') |
| # $data->[2]->[0]->[4] == 1 (line number) |
| # ref($data->[2]->[0]->[5]) == ARRAY (with 2 elements for 'node1' and 'node2') |
| # # child-nodes of actual node (see $_IDX) |
| # |
| # ref($data->[2]->[0]->[5]->[0]) == ARRAY (with 6 elements) |
| # $data->[2]->[0]->[5]->[0]->[0] == 1 (=> type == XML_READER_TYPE_ELEMENT) |
| # $data->[2]->[0]->[5]->[0]->[1] == 'node1' |
| # $data->[2]->[0]->[5]->[0]->[3] == undefined (=> no attribute) |
| # $data->[2]->[0]->[5]->[0]->[4] == 1 (line number) |
| # ref($data->[2]->[0]->[5]->[0]->[5]) == ARRAY (with 3 elements for 'some ', '<n/>' and ' text') |
| # |
| # ref($data->[2]->[0]->[5]->[0]->[5]->[0]) == ARRAY (with 2 elements) |
| # $data->[2]->[0]->[5]->[0]->[5]->[0]->[0] == 3 (=> type == XML_READER_TYPE_TEXT) |
| # $data->[2]->[0]->[5]->[0]->[5]->[0]->[1] == 'some ' |
| # |
| # ref($data->[2]->[0]->[5]->[0]->[5]->[1]) == ARRAY (with 5 elements) |
| # $data->[2]->[0]->[5]->[0]->[5]->[1]->[0] == 1 (=> type == XML_READER_TYPE_ELEMENT) |
| # $data->[2]->[0]->[5]->[0]->[5]->[1]->[1] == 'n' |
| # $data->[2]->[0]->[5]->[0]->[5]->[1]->[3] == undefined (=> no attribute) |
| # $data->[2]->[0]->[5]->[0]->[5]->[1]->[4] == 1 (line number) |
| # $data->[2]->[0]->[5]->[0]->[5]->[1]->[5] == undefined (=> no child-nodes) |
| # |
| # ref($data->[2]->[0]->[5]->[0]->[5]->[2]) == ARRAY (with 2 elements) |
| # $data->[2]->[0]->[5]->[0]->[5]->[2]->[0] == 3 (=> type == XML_READER_TYPE_TEXT) |
| # $data->[2]->[0]->[5]->[0]->[5]->[2]->[1] == ' text' |
| # |
| # |
| # retr_info() starts with the array reference ${$_[0]} (= \$tree_data->[2]), which corresponds to ${\$data->[2]} in the above example. |
| # Hence, the expression @{${$_[0]}} corresponds to @{${\$data->[2]}}, $e to ${${\$data->[2]}}[0] (= $data->[2]->[0]) and $e->[0] to |
| # ${${\$data->[2]}}[0]->[0] (= $data->[2]->[0]->[0]). |
| |
| |
| foreach $e ( @{${$_[0]}} ){ # iteration through all array elements ($_[0] is a reference to an array reference) |
| |
| |
| if ( $e->[0] == XML_READER_TYPE_ELEMENT ){ # element-node (see 'NODE TYPES' in manpage of XML::LibXML::Reader) |
| |
| |
| #~~~~ |
| # from here: tag-node (opening) |
| #~~~~ |
| |
| |
| # ~ handle structures ~ |
| |
| # $e->[1] represents the tag name |
| my $anno = $structures->add_new_annotation($e->[1]); |
| |
| # ~ handle tokens ~ |
| |
| # Add element also to token list |
| if ($_TOKENS_PROC && $e->[1] eq $_TOKENS_TAG) { |
| $tokens->add_annotation($anno); |
| }; |
| |
| # ~ handle attributes ~ |
| |
| if ( defined $e->[3] ){ # only if attributes exist |
| |
| for ( $c = 0; $c < @{$e->[3]}; $c += 2 ){ # with 'XCT_ATTRIBUTE_ARRAY', $node->[3] is an array reference of the form |
| # [ name1, value1, name2, value2, ....] of attribute names and corresponding values. |
| # note: arrays are faster (see: http://makepp.sourceforge.net/2.0/perl_performance.html) |
| |
| # '$c' references the 'key' and '$c+1' the 'value' |
| $anno->add_attribute( |
| @{$e->[3]}[$c, $c + 1] |
| ); |
| } |
| } |
| |
| |
| # ~ index 'from' ~ |
| |
| # this is, where a normal tag or tokens-tag ($_TOKENS_TAG) starts |
| $anno->set_from($dl + $add_one); |
| |
| #~~~~ |
| # until here: tag-node (opening) |
| #~~~~ |
| |
| |
| # ~~ RECURSION ~~ |
| |
| if ( defined $e->[$_IDX] ){ # do no recursion, if $e->[$_IDX] is not defined (because we have no array of child-nodes, e.g.: <back/>) |
| |
| retr_info($rl+1, \$e->[$_IDX]); # recursion with array of child-nodes |
| } |
| |
| |
| #~~~~~ |
| # from here: tag-node (closing) |
| #~~~~~ |
| |
| |
| # ~ handle structures and tokens ~ |
| |
| { |
| $fval = $anno->from; |
| |
| if ( $fval > 0 && not exists $ws{ $fval - 1 } ){ # ~ whitespace related issue ~ |
| |
| # ~ previous node was a text-node ~ |
| |
| $anno->set_from($fval - 1); |
| } |
| |
| # in case this fails, check input |
| die "ERROR ($0, retr_info()): text_id='$text_id', processing of \@structures: from-value ($fval) is 2 or more greater" |
| ." than to-value ($dl) => please check. aborting ...\n" |
| if ( $fval - 1 ) > $dl; |
| |
| # TODO: find example for which this case applies |
| # maybe this is not necessary anymore, because the above recorrection of the from-value suffices |
| # TODO: check, if it's better to remove this line and change above check to 'if ( $fval - 1) >= $dl; |
| # do testing with bigger corpus excerpt (wikipedia?) |
| $anno->set_from($dl) if $fval == $dl + 1; |
| $anno->set_to($dl); |
| $anno->set_level($rl); |
| |
| # note: use $dl, because the offsets are _between_ the characters (e.g.: word = 'Hello' => from = 0 (before 'H'), to = 5 (after 'o')) |
| } |
| |
| # ~ whitespace related issue ~ |
| # clean up |
| delete $ws{ $fval - 1 } if $fval > 0 && exists $ws{ $fval - 1 }; |
| |
| |
| #~~~~ |
| # until here: tag-node (closing) |
| #~~~~ |
| |
| |
| #~~~~~ |
| # from here: text- and whitespace-nodes |
| #~~~~~ |
| |
| # The 3rd form of nodes, besides text- (XML_READER_TYPE_TEXT) and tag-nodes (XML_READER_TYPE_ELEMENT) are nodes of the type |
| # 'XML_READER_TYPE_SIGNIFICANT_WHITESPACE'. |
| # |
| # When modifiying the previous example (see: Notes on how 'XML::CompactTree::XS' works) by inserting an additional blank between |
| # '</node1>' and '<node2>', the output for '$data->[2]->[0]->[5]->[1]->[1]' is a blank (' ') and it's type is '14' |
| # (XML_READER_TYPE_SIGNIFICANT_WHITESPACE, see 'man XML::LibXML::Reader'): |
| # |
| # echo '<node a="v"><node1>some <n/> text</node1> <node2>more-text</node2></node>' | perl -e 'use XML::CompactTree::XS; use XML::LibXML::Reader; $reader = XML::LibXML::Reader->new(IO => STDIN); $data = XML::CompactTree::XS::readSubtreeToPerl( $reader, XCT_DOCUMENT_ROOT | XCT_IGNORE_COMMENTS | XCT_LINE_NUMBERS ); print "node=\x27".$data->[2]->[0]->[5]->[1]->[1]."\x27, type=".$data->[2]->[0]->[5]->[1]->[0]."\n"' |
| |
| } elsif ( $e->[0] == XML_READER_TYPE_TEXT || $e->[0] == XML_READER_TYPE_SIGNIFICANT_WHITESPACE ){ |
| |
| # Notes on ~ whitespace related issue ~ (referred to the code fragment below) |
| # |
| # Example: '... <head type="main"><s>Campagne in Frankreich</s></head><head type="sub"> <s>1792</s> ...' |
| # |
| # Two text-nodes should normally be separated by a blank. In the above example, that would be the 2 text-nodes |
| # 'Campagne in Frankreich' and '1792', which are separated by the whitespace-node ' ' (see [2]). |
| # |
| # The text-node 'Campagne in Frankreich' leads to the setting of '$add_one' to 1, so that when opening the 2nd 'head'-tag, |
| # it's from-index gets set to the correct start-index of '1792' (and not to the start-index of the whitespace-node ' '). |
| # |
| # The assumption here is, that in most cases there _is_ a whitespace node between 2 text-nodes. The below code fragment |
| # enables a way, to check, if this really _was_ the case for the last 2 'non-tag'-nodes, when closing a tag: |
| # |
| # When a whitespace-node is read, its from-index is stored as a hash-key (in %ws), to state that it belongs to a ws-node. |
| # So when closing a tag, it can be checked, if the previous 'non-tag'-node (text or whitespace), which is the one before |
| # the last read 'non-tag'-node, was a actually _not_ a ws-node, but instead a text-node. In that case, the from-value of |
| # the last read 'non-tag'-node has to be corrected (see [1]), |
| # |
| # For whitespace-nodes $add_one is set to 0, so when opening the next tag (in the above example the 2nd 's'-tag), no |
| # additional 1 is added (because this was already done by the whitespace-node itself when incrementing the variable $dl). |
| # |
| # [1] |
| # Now, what happens, when 2 text-nodes are _not_ seperated by a whitespace-node (e.g.: <w>Augen<c>,</c></w>)? |
| # In this case, the falsely increased from-value has to be decreased again by 1 when closing the enclosing tag |
| # (see above code fragment '... not exists $ws{ $fval - 1 } ...'). |
| # |
| # [2] |
| # Comparing the 2 examples '<w>fu</w> <w>bar</w>' and '<w>fu</w><w> </w><w>bar</w>', is ' ' in both cases handled as a |
| # whitespace-node (XML_READER_TYPE_SIGNIFICANT_WHITESPACE). |
| # |
| # The from-index of the 2nd w-tag in the second example refers to 'bar', which may not have been the intention |
| # (even though '<w> </w>' doesn't make a lot of sense). TODO: could this be a bug? |
| # |
| # Empty tags also cling to the next text-token - e.g. in '<w>tok1</w> <w>tok2</w><a><b/></a> <w>tok3</w>' are the from- |
| # and to-indizes for the tags 'a' and 'b' both 12, which is the start-index of the token 'tok3'. |
| |
| if( $e->[0] == XML_READER_TYPE_SIGNIFICANT_WHITESPACE ){ |
| |
| # ~ whitespace-node ~ |
| |
| # ~ whitespace related issue ~ |
| |
| $add_one = 0; |
| |
| $ws{ $dl }++; # state, that this from-index belongs to a whitespace-node |
| # ('++' doesn't mean a thing here - maybe it could be used for a consistency check) |
| |
| }else{ |
| |
| # ~ text-node ~ |
| |
| $add_one = 1; |
| } |
| |
| |
| # ~ update $data and $dl ~ |
| |
| $data .= $e->[1]; |
| |
| $dl += length( $e->[1] ); # update length of $data |
| |
| |
| #~~~~~ |
| # until here: text- and whitespace-nodes |
| #~~~~~ |
| |
| |
| #elsif ( $e->[0] == XML_READER_TYPE_ATTRIBUTE ) # attribute-node |
| # note: attributes cannot be processed like this ( => use 'XCT_ATTRIBUTE_ARRAY' - see above ) |
| |
| |
| }else{ # not yet handled type |
| |
| die "ERROR ($0): Not yet handled type (\$e->[0]=".$e->[0].") ... => Aborting\n"; |
| } |
| |
| } # end: foreach iteration |
| |
| } # end: sub retr_info |
| |
| __END__ |
| |
| =pod |
| |
| =encoding utf8 |
| |
| =head1 NAME |
| |
| tei2korapxml - Conversion of TEI P5 based formats to KorAP-XML |
| |
| =head1 SYNOPSIS |
| |
| cat corpus.i5.xml | tei2korapxml > corpus.korapxml.zip |
| |
| =head1 DESCRIPTION |
| |
| C<tei2korapxml> is a script to convert TEI P5 and |
| L<I5|https://www1.ids-mannheim.de/kl/projekte/korpora/textmodell.html> |
| based documents to the |
| L<KorAP-XML format|https://github.com/KorAP/KorAP-XML-Krill#about-korap-xml>. |
| If no specific input is defined, data is |
| read from C<STDIN>. If no specific output is defined, data is written |
| to C<STDOUT>. |
| |
| This program is usually called from inside another script. |
| |
| =head1 FORMATS |
| |
| =head2 Input restrictions |
| |
| =over 2 |
| |
| =item |
| |
| utf8 encoded |
| |
| =item |
| |
| TEI P5 formatted input with certain restrictions: |
| |
| =over 4 |
| |
| =item |
| |
| B<mandatory>: text-header with integrated textsigle, text-body |
| |
| =item |
| |
| B<optional>: corp-header with integrated corpsigle, |
| doc-header with integrated docsigle |
| |
| =back |
| |
| =item |
| |
| all tokens inside the primary text (inside $data) may not be |
| newline seperated, because newlines are removed |
| (see code section C<~ inside text body ~>) and a conversion of newlines |
| into blanks between 2 tokens could lead to additional blanks, |
| where there should be none (e.g.: punctuation characters like C<,> or |
| C<.> should not be seperated from their predecessor token). |
| (see also code section C<~ whitespace handling ~>). |
| |
| =back |
| |
| =head2 Notes on the output |
| |
| =over 2 |
| |
| =item |
| |
| zip file output (default on C<stdout>) with utf8 encoded entries |
| (which together form the KorAP-XML format) |
| |
| =back |
| |
| =head1 INSTALLATION |
| |
| C<tei2korapxml> requires L<libxml2-dev> bindings to build. When |
| these bindings are available, the preferred way to install the script is |
| to use L<cpanm|App::cpanminus>. |
| |
| $ cpanm https://github.com/KorAP/KorAP-XML-TEI.git |
| |
| In case everything went well, the C<tei2korapxml> tool will |
| be available on your command line immediately. |
| |
| Minimum requirement for L<KorAP::XML::TEI> is Perl 5.16. |
| |
| =head1 OPTIONS |
| |
| =over 2 |
| |
| =item B<--root|-r> |
| |
| The root directory for output. Defaults to C<.>. |
| |
| =item B<--help|-h> |
| |
| Print help information. |
| |
| =item B<--version|-v> |
| |
| Print version information. |
| |
| =item B<--tokenizer-call|-tc> |
| |
| Call an external tokenizer process, that will tokenize |
| a single line from STDIN and outputs one token per line. |
| |
| =item B<--use-intern-tokenization|-ti> |
| |
| Tokenize the data using two embedded tokenizers, |
| that will take an I<Aggressive> and a I<conservative> |
| approach. |
| |
| =item B<--log|-l> |
| |
| Loglevel for I<Log::Any>. Defaults to C<notice>. |
| |
| =back |
| |
| =head1 COPYRIGHT AND LICENSE |
| |
| Copyright (C) 2020, L<IDS Mannheim|https://www.ids-mannheim.de/> |
| |
| Author: Peter Harders |
| |
| Contributors: Marc Kupietz, Carsten Schnober, Nils Diewald |
| |
| L<KorAP::XML::TEI> is developed as part of the L<KorAP|https://korap.ids-mannheim.de/> |
| Corpus Analysis Platform at the |
| L<Leibniz Institute for the German Language (IDS)|http://ids-mannheim.de/>, |
| member of the |
| L<Leibniz-Gemeinschaft|http://www.leibniz-gemeinschaft.de/>. |
| |
| This program is free software published under the |
| L<BSD-2 License|https://raw.githubusercontent.com/KorAP/KorAP-XML-TEI/master/LICENSE>. |
| |
| =cut |