| #!/usr/bin/env perl | 
 | use strict; | 
 | use warnings; | 
 | use POSIX; | 
 | use Log::Any '$log'; | 
 | use Log::Any::Adapter; | 
 | use Pod::Usage; | 
 | use Getopt::Long qw(GetOptions :config no_auto_abbrev); | 
 | use Encode; | 
 |  | 
 | my $MAX_SENTENCE_LENGTH=10000; | 
 | my $COMMENT_START="#"; | 
 |  | 
 | my $test=0; | 
 | my $text_no=0; | 
 | my %opts; | 
 | my %plain_texts; | 
 | my %sentence_ends; | 
 |  | 
 | our $VERSION = '0.3.900'; | 
 |  | 
 | our $VERSION_MSG = "\nkorapxml2conllu - v$VERSION\n"; | 
 |  | 
 | use constant { | 
 |   # Set to 1 for minimal more debug output (no need to be parametrized) | 
 |   DEBUG => $ENV{KORAPXMLCONLLU_DEBUG} // 0 | 
 | }; | 
 |  | 
 | GetOptions( | 
 |   'sigle-pattern|p=s'            => \(my $sigle_pattern = ''), | 
 |   'extract-attributes-regex|e=s' => \(my $extract_attributes_regex = ''), | 
 |   'log|l=s'                      => \(my $log_level = 'warn'), | 
 |  | 
 |   'help|h'                       => sub { | 
 |     pod2usage( | 
 |       -verbose  => 99, | 
 |       -sections => 'NAME|DESCRIPTION|SYNOPSIS|ARGUMENTS|OPTIONS|EXAMPLES', | 
 |       -msg      => $VERSION_MSG, | 
 |       -output   => '-' | 
 |     ) | 
 |   }, | 
 |   'version|v'                    => sub { | 
 |     pod2usage( | 
 |       -verbose => 0, | 
 |       -msg     => $VERSION_MSG, | 
 |       -output  => '-' | 
 |     ); | 
 |   } | 
 | ); | 
 |  | 
 | # Establish logger | 
 | binmode(STDERR, ':encoding(UTF-8)'); | 
 | Log::Any::Adapter->set('Stderr', log_level => $log_level); | 
 | $log->notice('Debugging is activated') if DEBUG; | 
 |  | 
 | my $docid=""; | 
 | my ($current_id, $current_from, $current_to, $token); | 
 | my $current; | 
 | my ($unknown, $known) = (0, 0); | 
 | my @current_lines; | 
 | my %processedFilenames; | 
 | my $zipsiglepattern = (defined($ENV{ZIPSIGLEPATTERN})? $ENV{ZIPSIGLEPATTERN} : ""); | 
 | my $baseOnly; | 
 | my %extras; | 
 |  | 
 | my ($ID_idx, $FORM_idx, $LEMMA_idx, $UPOS_idx, $XPOS_idx, $FEATS_idx, $HEAD_idx, $DEPREC_idx, $DEPS_idx, $MISC_idx) = (0..9); | 
 |  | 
 | my $UNZIP = `sh -c 'command -v unzip'`; | 
 | chomp $UNZIP; | 
 |  | 
 |  | 
 | if ($UNZIP eq '') { | 
 |   warn('No unzip executable found in PATH.'); | 
 |   return 0; | 
 | }; | 
 |  | 
 | foreach my $morpho_zip (@ARGV) { | 
 |   die "cannot open $morpho_zip" if(! -r $morpho_zip); | 
 |   my $data_zip = $morpho_zip; | 
 |   if ($data_zip !~ /\.zip/ && $data_zip =~ /\.conllu?/i) { | 
 |     open(CONLL, "<$data_zip") or die "cannot open $data_zip"; | 
 |     while(<CONLL>) { | 
 |       print; | 
 |     } | 
 |     close(CONLL); | 
 |     next; | 
 |   } | 
 |   $data_zip =~ s/\.([^.]+)\.zip$/.zip/; | 
 |   my $foundry = $1; | 
 |   die "cannot open data file $data_zip corresponding to $morpho_zip" if(! -r $data_zip); | 
 |    | 
 |   my $first=1; | 
 |   my @conll = ("_") x 10; | 
 |   my $filename; | 
 |  | 
 |   $baseOnly = $morpho_zip eq $data_zip; | 
 |   my ($morphoOrTokenCommand, $plaintextAndStructureCommand); | 
 |   if (!$baseOnly) { | 
 |     $morphoOrTokenCommand = "$UNZIP -c $morpho_zip '*/${sigle_pattern}*/*/*/morpho.xml' $zipsiglepattern |"; | 
 |     if ($extract_attributes_regex) { | 
 |       $plaintextAndStructureCommand = "$UNZIP -c $data_zip '*/${sigle_pattern}*/*/[sd][ta]*.xml' $zipsiglepattern |"; | 
 |     } else { | 
 |       $plaintextAndStructureCommand = "$UNZIP -c $data_zip '*/${sigle_pattern}*/*/data.xml' $zipsiglepattern |"; | 
 |     } | 
 |   } else { | 
 |     $foundry = "base"; | 
 |     $morphoOrTokenCommand = "$UNZIP -c $morpho_zip '*/${sigle_pattern}*/*/*/tokens.xml' $zipsiglepattern |"; | 
 |     $plaintextAndStructureCommand = "$UNZIP -c $data_zip '*/${sigle_pattern}*/*/[sd][ta]*.xml' $zipsiglepattern |"; | 
 |   } | 
 |  | 
 |   open (MORPHO_OR_TOKENPIPE, $morphoOrTokenCommand) or die "cannot unzip $morpho_zip"; | 
 |   open (PLAINTEXTPIPE, $plaintextAndStructureCommand) or die "cannot unzip $data_zip"; | 
 |   print "$COMMENT_START foundry = $foundry\n"; | 
 |   while (<MORPHO_OR_TOKENPIPE>) { | 
 |     if (/^  inflating: (.*)/) { | 
 |       $filename=$1; | 
 |       while($processedFilenames{$filename} && !eof(MORPHO_OR_TOKENPIPE)) { | 
 |         $log->warn("$filename already processed"); | 
 |         while (<MORPHO_OR_TOKENPIPE>) { | 
 |           last if(/\s+inflating:\s+(.*)/); | 
 |         } | 
 |         $filename=$1 if(!eof(MORPHO_OR_TOKENPIPE) && /\s+inflating:\s+(.*)/); | 
 |       } | 
 |     } elsif(m@^\s*<layer\s+.*docid="([^"]+)"@) { | 
 |       last if($test && $text_no++ > 3); | 
 |       if(!$first) { | 
 |         closeDoc(0); | 
 |       } | 
 |       $processedFilenames{$filename}=1; | 
 |       $docid=$1; | 
 |       @current_lines=(); | 
 |       $known=$unknown=0; | 
 |       $current=""; | 
 |       if ($first) { | 
 |           $first = 0; | 
 |       } | 
 |       if(!fetch_plaintext($docid)) { # skip this text | 
 |         while (<MORPHO_OR_TOKENPIPE>) { | 
 |           last if(m@</layer>@); | 
 |         } | 
 |       } | 
 |       print STDOUT "$COMMENT_START filename = $filename\n$COMMENT_START text_id = $docid\n"; | 
 |       $log->debug("Analyzing $docid"); | 
 |     } elsif (m@^\s*<f\s+.*name="([^"]+)">([^<]+)</f>@) { | 
 |       if ($1 eq "lemma") { | 
 |         $conll[$LEMMA_idx] = $2; | 
 |         $conll[$LEMMA_idx] =~ s/[\t\n\r]//g; # make sure that lemmas never contain tabs or newlines | 
 |         if($conll[$LEMMA_idx] eq 'UNKNOWN') { | 
 |           $conll[$LEMMA_idx] = "--"; | 
 |           $unknown++; | 
 |         } else { | 
 |           $known++; | 
 |         } | 
 |       } elsif ($1 eq 'pos' || $1 eq "ctag") { | 
 |         $unknown++; | 
 |         $conll[$XPOS_idx] = $conll[$UPOS_idx] = $2; | 
 |       } elsif ($1 eq 'msd') { | 
 |         $conll[$FEATS_idx] = $2; | 
 |       } elsif ($1 eq 'certainty') { | 
 |         $conll[$MISC_idx] = $2; | 
 |       } | 
 |     } elsif (/<span /) { | 
 |       my $last_from = $current_from // -1; | 
 |       ($current_id) = /id="[^0-9]*([^\"]*)"/; | 
 |       ($current_from) = /from="([^\"]*)"/; | 
 |       ($current_to) = /to="([^\"]*)"/; | 
 |       if($extract_attributes_regex) { | 
 |         for (my $i = $last_from + 1; $i <= $current_from; $i++) { | 
 |           if ($extras{$docid}{$i}) { | 
 |             $current .= $extras{$docid}{$i}; | 
 |             undef $extras{$docid}{$i}; | 
 |           } | 
 |         } | 
 |       } | 
 |       $log->debug("found span: $current_id $current_from $current_to"); | 
 |       $token = substr($plain_texts{$docid}, $current_from, $current_to - $current_from); | 
 |       if (!defined $token) { | 
 |         $log->warn("could not retrieve token for $docid at $current_from-$current_to/", length($plain_texts{$docid}), " - ending with: ", substr($plain_texts{$docid},length($plain_texts{$docid})-10)); | 
 |         $token = "_"; | 
 |       } | 
 |       $token=~s/[\t\n\r]//g; # make sure that tokens never contain tabs or newlines | 
 |       @conll = ("_") x 10; | 
 |       $conll[$FORM_idx] = encode("utf-8", $token); | 
 |       if($baseOnly) { | 
 |         my @vals = ($current_from, $current_to); | 
 |         $log->debug("joining : ", join(" ", @vals)); | 
 |         push @current_lines, \@vals; | 
 |         $known++; | 
 |         $conll[$ID_idx] = $#current_lines+1; | 
 |         $current .= join("\t", @conll) . "\n"; # conll columns | 
 |         fetch_plaintext($docid); | 
 |         if ($sentence_ends{$docid}{$current_to}) { | 
 |           $current .= "\n"; | 
 |           printTokenRanges(); | 
 |           print STDOUT $current; | 
 |           $current = ""; | 
 |           $known = 0; | 
 |           $unknown = 0; | 
 |           @current_lines = (); | 
 |         } | 
 |       } | 
 |     } elsif (m@^\s*</fs>@) { | 
 |       my @vals = ($current_from, $current_to); | 
 |       $log->debug("joining : ", join(" ", @vals)); | 
 |       push @current_lines, \@vals; | 
 |       # convert gathered information to CONLL | 
 |       $conll[$ID_idx] = $#current_lines+1; | 
 |       $current .= join("\t", @conll) . "\n"; # conll columns | 
 |       if($conll[$XPOS_idx] eq '$.' || ($conll[$XPOS_idx] eq 'SENT' && $token eq '.') || $known + $unknown >= $MAX_SENTENCE_LENGTH) { | 
 |         $current .= "\n"; | 
 |         if($known + $unknown > 0) { # only print sentence if it contains some words | 
 |           printTokenRanges(); | 
 |           print STDOUT $current; | 
 |         } | 
 |         $current=""; $known=0; $unknown=0; | 
 |         @current_lines = (); | 
 |       } | 
 |       while (<MORPHO_OR_TOKENPIPE>) { | 
 |         last if (m@</span>@);    # only consider first interpretation | 
 |       } | 
 |     } | 
 |   } | 
 |   $current .= "\n"; | 
 |   closeDoc(1); | 
 |   close(MORPHO_OR_TOKENPIPE); | 
 |   close(PLAINTEXTPIPE); | 
 | } | 
 | exit; | 
 |  | 
 | sub printTokenRanges { | 
 |   print "$COMMENT_START start_offsets = ", $current_lines[0]->[0]; | 
 |   foreach my $t (@current_lines) { | 
 |     print STDOUT " $t->[0]"; | 
 |   } | 
 |   print "\n$COMMENT_START end_offsets = ", $current_lines[$#current_lines]->[1]; | 
 |   foreach my $t (@current_lines) { | 
 |     print STDOUT " $t->[1]"; | 
 |   } | 
 |   print "\n"; | 
 | } | 
 |  | 
 | sub closeDoc { | 
 |   $log->debug("closing doc"); | 
 |   if($known + $unknown > 0) { # only parse a sentence if it has some words | 
 |     chomp $current; | 
 |     chomp $current; | 
 |     chomp $current; | 
 |     $current .= "\n\n"; | 
 |     printTokenRanges(); | 
 |     print STDOUT $current; | 
 |   } | 
 | } | 
 |  | 
 | # read data.xml to figure out the tokens | 
 | # (ideally tokens should also be in in morpho.xml, but they are not) | 
 | sub fetch_plaintext { | 
 |   my ($target_id) = @_; | 
 |   my $docid; | 
 |   my $text_started=0; | 
 |   my $text_count = 0; | 
 |   my ($current_id, $current_from, $current_to); | 
 |  | 
 |   if($plain_texts{$target_id} && (!$baseOnly || $sentence_ends{$target_id}{-1})) { | 
 | #    print STDERR "already got $target_id\n"; | 
 |     return 1; | 
 |   } | 
 |   while(<PLAINTEXTPIPE>) { | 
 |     if(/<raw_text[^>]+docid="([^"]*)/) { | 
 |       $docid=$1; | 
 |       $text_started=0; | 
 |     } elsif(/<layer[^>]+docid="([^"]*)/) { | 
 |       $docid=$1; | 
 |       $sentence_ends{$docid}{-1}=1; | 
 |     } elsif(m@<span @) { | 
 |         ($current_id) = /id="[^0-9]*([^\"]*)"/; | 
 |         ($current_from) = /from="([^\"]*)"/; | 
 |         ($current_to) = /to="([^\"]*)"/; | 
 |     } elsif(m@<f\s[^>]*>s</f>@) { | 
 |       $log->debug("Found sentence end for $docid \@$current_to"); | 
 |       $sentence_ends{$docid}{$current_to}=1; | 
 |     } elsif($extract_attributes_regex && m@<f\sname="name"[^>]*>([^<]+)</f>@) { | 
 |       my $current_element = $1; | 
 |       while(<PLAINTEXTPIPE>) { | 
 |         last if(m@</fs>@); | 
 |         if(m@<f\sname="([^"]+)"[^>]*>([^<]+)</f>@) { | 
 |           my $current_node = "$current_element/$1"; | 
 |           my $value = $2; | 
 |           if ($current_node =~ /$extract_attributes_regex/) { | 
 | #            print "# $docid $current_from-$current_to :: $current_node = $value\n"; | 
 |             $extras{$docid}{$current_from} .= "# $current_node = $value\n"; | 
 |           } | 
 |         } | 
 |       } | 
 |     } elsif (m@<text>(.*)</text>@) { | 
 |       $_= decode("utf-8", $1, Encode::FB_DEFAULT); | 
 |       s/</</go; | 
 |       s/>/>/go; | 
 |       s/&/&/go; | 
 |       tr/…•⋅»«ˮ“”„›‹ʼ‘’‚′‐‑‒–—―⁓⁻₋−﹣-/...""""""'''''''-/; | 
 |       $plain_texts{$docid} = $_; | 
 |       last if($text_count++ > 1 && $plain_texts{$target_id}); | 
 |     } elsif (m@<text>(.*)@) { | 
 |       $_= decode("utf-8", $1, Encode::FB_DEFAULT); | 
 |       s/</</go; | 
 |       s/>/>/go; | 
 |       s/&/&/go; | 
 |       tr/…•⋅»«ˮ“”„›‹ʼ‘’‚′‐‑‒–—―⁓⁻₋−﹣-/...""""""'''''''-/; | 
 |       $plain_texts{$docid} = "$_ "; | 
 |       $text_started=1; | 
 |     } elsif ($text_started && m@(.*)</text>@) { | 
 |       $_= decode("utf-8", $1, Encode::FB_DEFAULT); | 
 |       s/</</go; | 
 |       s/>/>/go; | 
 |       s/&/&/go; | 
 |       tr/…•⋅»«ˮ“”„›‹ʼ‘’‚′‐‑‒–—―⁓⁻₋−﹣-/...""""""'''''''-/; | 
 |       $plain_texts{$docid} .= $_; | 
 |       $text_started=0; | 
 |       last if($text_count++ > 1 && $plain_texts{$target_id}); | 
 |     } elsif ($text_started) { | 
 |       chomp; | 
 |       $_ = decode("utf-8", $_, Encode::FB_DEFAULT) . ' '; | 
 |       s/</</go; | 
 |       s/>/>/go; | 
 |       s/&/&/go; | 
 |       tr/…•⋅»«ˮ“”„›‹ʼ‘’‚′‐‑‒–—―⁓⁻₋−﹣-/...""""""'''''''-/; | 
 |       $plain_texts{$docid} .= $_; | 
 |     } | 
 |   } | 
 |   if(defined($ENV{PLAINTEXTFILTER})) { | 
 |     if ($plain_texts{$docid} !~ $ENV{PLAINTEXTFILTER}) { | 
 |       $plain_texts{$docid} = undef; | 
 |       $log->info("Skipping $docid"); | 
 |       return(undef); | 
 |     } else { | 
 |       $log->debug("Using $docid"); | 
 |     } | 
 |   }  | 
 |   return(1); | 
 | } | 
 |  | 
 | =pod | 
 |  | 
 | =encoding utf8 | 
 |  | 
 | =head1 NAME | 
 |  | 
 | korapxml2conllu - Conversion of KorAP-XML zips to CoNLL-U | 
 |  | 
 | =head1 SYNOPSIS | 
 |  | 
 |   korapxml2conllu zca15.tree_tagger.zip > zca15.conllu | 
 |  | 
 | =head1 DESCRIPTION | 
 |  | 
 | C<korapxml2conllu> is a script to Convert L<KorAP-XML format|https://github.com/KorAP/KorAP-XML-Krill#about-korap-xml> base or morpho zips to CoNLL(-U) format with all information necessary | 
 |  for reconstruction in comment lines. | 
 |  | 
 | =head1 INSTALLATION | 
 |  | 
 |   $ cpanm https://github.com/KorAP/KorAP-XML-CoNLL-U.git | 
 |  | 
 | =head1 OPTIONS | 
 |  | 
 | =over 2 | 
 |  | 
 | =item B<--sigle-pattern|-p> | 
 |  | 
 | Convert only texts from the KorAP XML zip files with folder names (i.e. sigles) matching the glob pattern. | 
 |  | 
 | =item B<--extract-attribute-pattern|-e> | 
 |  | 
 | Extract element/attribute regular expressions to comments. | 
 |  | 
 | =item B<--help|-h> | 
 |  | 
 | Print help information. | 
 |  | 
 | =item B<--version|-v> | 
 |  | 
 | Print version information. | 
 |  | 
 |  | 
 | =item B<--log|-l> | 
 |  | 
 | Loglevel for I<Log::Any>. Defaults to C<warn>. | 
 |  | 
 | =back | 
 |  | 
 | =head1 EXAMPLES | 
 |  | 
 |  korapxml2conllu -e '(posting/id|div/id)' t/data/wdf19.zip | 
 |  | 
 | =head1 COPYRIGHT AND LICENSE | 
 |  | 
 | Copyright (C) 2021, L<IDS Mannheim|https://www.ids-mannheim.de/> | 
 |  | 
 | Author: Marc Kupietz | 
 |  | 
 | Contributors: Nils Diewald | 
 |  | 
 | L<KorAP::XML::CoNNL-U> is developed as part of the L<KorAP|https://korap.ids-mannheim.de/> | 
 | Corpus Analysis Platform at the | 
 | L<Leibniz Institute for the German Language (IDS)|http://ids-mannheim.de/>, | 
 | member of the | 
 | L<Leibniz-Gemeinschaft|http://www.leibniz-gemeinschaft.de/>. | 
 |  | 
 | This program is free software published under the | 
 | L<BSD-2 License|https://opensource.org/licenses/BSD-2-Clause>. |