Stable testing across datasets
diff --git a/systems/Run_Tree-RNN_Taggers.txt b/systems/Run_Tree-RNN_Taggers.txt
new file mode 100644
index 0000000..e0ea45f
--- /dev/null
+++ b/systems/Run_Tree-RNN_Taggers.txt
@@ -0,0 +1,7 @@
+
+# TreeTagger:
+
+time cmd/tree-tagger-german-notokenize /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu.sep.tok /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.treetagger.parsed.conll
+
+# RNN Tagger:
+time cmd/rnn-tagger-german-notokenize.sh /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu.tok > /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.RNNtagger.parsed.conll
\ No newline at end of file
diff --git a/systems/evaluate.py b/systems/evaluate.py
new file mode 100644
index 0000000..11ffd51
--- /dev/null
+++ b/systems/evaluate.py
@@ -0,0 +1,215 @@
+from lib.CoNLL_Annotation import *
+from collections import Counter, defaultdict
+import pandas as pd
+import numpy as np
+from sklearn.metrics import precision_recall_fscore_support as eval_f1
+from tabulate import tabulate
+import logging, argparse, sys
+from datetime import datetime
+
+
+tree_tagger_fixes = {
+ "die": "der",
+ "eine": "ein",
+ "dass": "daß",
+ "keine": "kein",
+ "dies": "dieser",
+ "erst": "erster",
+ "andere": "anderer",
+ "alle": "aller",
+ "Sie": "sie",
+ "wir": "uns",
+ "alle": "aller",
+ "wenige": "wenig"
+}
+
+
+def save_evaluated(all_sys, all_gld, out_path, print_gold=True):
+ with open(out_path, "w") as out:
+ if print_gold:
+ out.write(f"ORIGINAL_CORPUS_TAGS\n\nTAG\tGLD_COUNT\tSYS_COUNT\n")
+ for g_tag,g_count in sorted(all_gld.items()):
+ s_count = all_sys.get(g_tag, 0)
+ out.write(f"{g_tag}\t{g_count}\t{s_count}\n")
+
+ out.write("\n\nSYSTEM_ONLY_TAGS\n\nTAG\tG_COUNT\tSYS_COUNT\n")
+ for s_tag,s_count in sorted(all_sys.items()):
+ g_count = all_gld.get(s_tag, 0)
+ if g_count == 0:
+ out.write(f"{s_tag}\t{g_count}\t{s_count}\n")
+
+
+
+def eval_lemma(sys, gld):
+ match, err, symbol = 0, 0, []
+ y_gld, y_pred, mistakes = [], [], []
+ for i, gld_tok in enumerate(gld.tokens):
+ # sys_lemma = tree_tagger_fixes.get(sys.tokens[i].lemma, sys.tokens[i].lemma) # Omit TreeTagger "errors" because of article lemma disagreement
+ sys_lemma = sys.tokens[i].lemma
+ y_gld.append(gld_tok.pos_tag)
+ y_pred.append(sys_lemma)
+ if gld_tok.lemma == sys_lemma:
+ match += 1
+ elif not sys.tokens[i].lemma.isalnum(): # Turku does not lemmatize symbols (it only copies them) => ERR ((',', '--', ','), 43642)
+ symbol.append(sys.tokens[i].lemma)
+ if sys.tokens[i].word == sys.tokens[i].lemma:
+ match += 1
+ else:
+ err += 1
+ else:
+ err += 1
+ mistakes.append((gld_tok.word, gld_tok.lemma, sys.tokens[i].lemma))
+ return y_gld, y_pred, match, err, symbol, mistakes
+
+
+def eval_pos(sys, gld):
+ match, mistakes = 0, []
+ y_gld, y_pred = [], []
+ for i, gld_tok in enumerate(gld.tokens):
+ y_gld.append(gld_tok.pos_tag)
+ y_pred.append(sys.tokens[i].pos_tag)
+ # pos_all_pred[gld_tok.pos_tag] += 1
+ # pos_all_gold[sys.tokens[i].pos_tag] += 1
+ if gld_tok.pos_tag == sys.tokens[i].pos_tag:
+ match += 1
+ elif gld_tok.pos_tag == "$." and sys.tokens[i].pos_tag == "$":
+ match += 1
+ y_pred = y_pred[:-1] + ["$."]
+ else:
+ mistakes.append((gld_tok.word, gld_tok.pos_tag, sys.tokens[i].pos_tag))
+ return y_gld, y_pred, match, mistakes
+
+
+
+if __name__ == "__main__":
+ """
+ EVALUATIONS:
+
+ ********** TIGER CORPUS ALL ************
+
+ python systems/evaluate.py -t Turku --corpus_name Tiger\
+ --sys_file /home/daza/datasets/TIGER_conll/tiger_turku_parsed.conllu \
+ --gld_file /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09
+
+ python systems/evaluate.py -t SpaCy --corpus_name Tiger\
+ --sys_file /home/daza/datasets/TIGER_conll/tiger_spacy_parsed.conllu \
+ --gld_file /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09
+
+ python systems/evaluate.py -t RNNTagger --corpus_name Tiger\
+ --sys_file /home/daza/datasets/TIGER_conll/tiger_all.parsed.RNNTagger.conll \
+ --gld_file /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09
+
+ python systems/evaluate.py -t TreeTagger --corpus_name Tiger\
+ --sys_file /home/daza/datasets/TIGER_conll/tiger_all.parsed.TreeTagger.conll \
+ --gld_file /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09
+
+ ********** UNIVERSAL DEPENDENCIES TEST-SET ************
+
+ python systems/evaluate.py -t Turku --gld_token_type CoNLLUP_Token --corpus_name DE_GSD\
+ --sys_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu.parsed.0.conllu \
+ --gld_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+
+ python systems/evaluate.py -t SpaCyGL --gld_token_type CoNLLUP_Token --corpus_name DE_GSD\
+ --sys_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.parsed.germalemma.conllu \
+ --gld_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+
+ python systems/evaluate.py -t SpaCy --gld_token_type CoNLLUP_Token --corpus_name DE_GSD\
+ --sys_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.parsed.conllu \
+ --gld_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+
+ python systems/evaluate.py -t RNNTagger --gld_token_type CoNLLUP_Token --corpus_name DE_GSD\
+ --sys_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.RNNtagger.parsed.conll \
+ --gld_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+
+ python systems/evaluate.py -t TreeTagger --gld_token_type CoNLLUP_Token --corpus_name DE_GSD\
+ --sys_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.treetagger.parsed.conll \
+ --gld_file /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+
+ """
+
+ # =====================================================================================
+ # INPUT PARAMS
+ # =====================================================================================
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-s", "--sys_file", help="System output in CoNLL-U Format", required=True)
+ parser.add_argument("-g", "--gld_file", help="Gold Labels to evaluate in CoNLL-U Format", required=True)
+ parser.add_argument("-t", "--type_sys", help="Which system produced the outputs", default="system")
+ parser.add_argument("-c", "--corpus_name", help="Corpus Name for Gold Labels", required=True)
+ parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLL09_Token")
+ parser.add_argument("-cs", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
+ args = parser.parse_args()
+
+ # =====================================================================================
+ # LOGGING INFO ...
+ # =====================================================================================
+ logger = logging.getLogger(__name__)
+ console_hdlr = logging.StreamHandler(sys.stdout)
+ file_hdlr = logging.FileHandler(filename=f"logs/Eval_{args.corpus_name}.{args.type_sys}.log")
+ logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
+ now_is = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ logger.info(f"\n\nEvaluating {args.corpus_name} Corpus {now_is}")
+
+ # Read the Original GOLD Annotations [CoNLL09, CoNLLUP]
+ gld_generator = read_conll_generator(args.gld_file, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
+ # Read the Annotations Generated by the Automatic Parser [Turku, SpaCy, RNNTagger]
+ if args.type_sys == "RNNTagger":
+ sys_generator = read_conll_generator(args.sys_file, token_class=RNNTagger_Token, comment_str="#")
+ elif args.type_sys == "TreeTagger":
+ sys_generator = read_conll_generator(args.sys_file, token_class=RNNTagger_Token, sent_sep="</S>", comment_str="#")
+ else:
+ sys_generator = read_conll_generator(args.sys_file, token_class=CoNLLUP_Token, comment_str="#")
+
+ lemma_all_match, lemma_all_err, lemma_all_mistakes = 0, 0, []
+ lemma_all_symbols, sys_only_lemmas = [], []
+ pos_all_match, pos_all_err, pos_all_mistakes = 0, 0, []
+ pos_all_pred, pos_all_gld = [], []
+ lemma_all_pred, lemma_all_gld = [], []
+ n_sents = 0
+
+ for i, (s,g) in enumerate(zip(sys_generator, gld_generator)):
+ # print([x.word for x in s.tokens])
+ # print([x.word for x in g.tokens])
+ assert len(s.tokens) == len(g.tokens), f"Token Mismatch! S={len(s.tokens)} G={len(g.tokens)} IX={i+1}"
+ n_sents += 1
+ # Lemmas ...
+ lemma_gld, lemma_pred, lemma_match, lemma_err, lemma_sym, mistakes = eval_lemma(s,g)
+ lemma_all_match += lemma_match
+ lemma_all_err += lemma_err
+ lemma_all_mistakes += mistakes
+ lemma_all_symbols += lemma_sym
+ lemma_all_pred += lemma_pred
+ lemma_all_gld += lemma_gld
+ # POS Tags ...
+ pos_gld, pos_pred, pos_match, pos_mistakes = eval_pos(s, g)
+ pos_all_pred += pos_pred
+ pos_all_gld += pos_gld
+ pos_all_match += pos_match
+ pos_all_err += len(pos_mistakes)
+ pos_all_mistakes += pos_mistakes
+
+ logger.info(f"A total of {n_sents} sentences were analyzed")
+
+ # Lemmas ...
+ logger.info(f"Lemma Matches = {lemma_all_match} || Errors = {lemma_all_err} || Symbol Chars = {len(lemma_all_symbols)}")
+ logger.info(f"Lemma Accuracy = {(lemma_all_match*100/(lemma_all_match + lemma_all_err)):.2f}%\n")
+ lemma_miss_df = pd.DataFrame(lemma_all_mistakes, columns =['Gold_Word', 'Gold_Lemma', 'Sys_Lemma']).value_counts()
+ lemma_miss_df.to_csv(path_or_buf=f"outputs/LemmaErrors.{args.corpus_name}.{args.type_sys}.tsv", sep="\t")
+ save_evaluated(Counter(lemma_all_pred), Counter(lemma_all_gld),
+ f"outputs/Lemma-Catalogue.{args.corpus_name}.{args.type_sys}.txt", print_gold=False)
+
+ # POS Tags ...
+ logger.info(f"POS Matches = {pos_all_match} || Errors = {pos_all_err}")
+ logger.info(f"POS Tagging Accuracy = {(pos_all_match*100/(pos_all_match + pos_all_err)):.2f}%\n")
+ pos_miss_df = pd.DataFrame(pos_all_mistakes, columns =['Gold_Word', 'Gold_POS', 'Sys_POS']).value_counts()
+ pos_miss_df.to_csv(path_or_buf=f"outputs/POS-Errors.{args.corpus_name}.{args.type_sys}.tsv", sep="\t")
+ save_evaluated(Counter(pos_all_pred), Counter(pos_all_gld), f"outputs/POS-Catalogue.{args.corpus_name}.{args.type_sys}.txt")
+
+ ordered_labels = sorted(set(pos_all_gld))
+ p_labels, r_labels, f_labels, support = eval_f1(y_true=pos_all_gld, y_pred=pos_all_pred, labels=ordered_labels , average=None)
+ scores_per_label = zip(ordered_labels, [x*100 for x in p_labels], [x*100 for x in r_labels], [x*100 for x in f_labels])
+ logger.info("\n\n")
+ logger.info(tabulate(scores_per_label, headers=["POS Tag","Precision", "Recall", "F1"], floatfmt=".2f"))
+ p_labels, r_labels, f_labels, support = eval_f1(y_true=np.array(pos_all_gld), y_pred=np.array(pos_all_pred), average='macro', zero_division=0)
+ logger.info(f"Total Prec = {p_labels*100}\tRec = {r_labels*100}\tF1 = {f_labels*100}")
+
+
\ No newline at end of file
diff --git a/systems/parse_spacy.py b/systems/parse_spacy.py
new file mode 100644
index 0000000..75a1eb5
--- /dev/null
+++ b/systems/parse_spacy.py
@@ -0,0 +1,111 @@
+import argparse
+import spacy
+from spacy.tokens import Doc
+import logging, sys, time
+from lib.CoNLL_Annotation import get_token_type
+import my_utils.file_utils as fu
+from germalemma import GermaLemma
+
+
+class WhitespaceTokenizer(object):
+ def __init__(self, vocab):
+ self.vocab = vocab
+
+ def __call__(self, text):
+ words = text.split(' ')
+ # All tokens 'own' a subsequent space character in this tokenizer
+ spaces = [True] * len(words)
+ return Doc(self.vocab, words=words, spaces=spaces)
+
+
+def get_conll_str(spacy_doc, use_germalemma):
+ conll_lines = [] # We want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC]
+ for ix, token in enumerate(spacy_doc):
+ if use_germalemma == "True":
+ content = (str(ix), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, "_", "_", "_", "_", "_")
+ else:
+ content = (str(ix), token.text, token.lemma_, token.pos_, token.tag_, "_", "_", "_", "_", "_") # Pure SpaCy!
+ conll_lines.append("\t".join(content))
+ return "\n".join(conll_lines)
+
+
+# def freeling_lemma_lookup():
+# dicts_path = "/home/daza/Frameworks/FreeLing/data/de/dictionary/entries/"
+
+def find_germalemma(word, pos, spacy_lemma):
+ simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ",
+ "NA":"N", "NE":"N", "NN":"N",
+ "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV",
+ "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V",
+ "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V"
+ }
+ # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"}
+ try:
+ return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK"))
+ except:
+ return spacy_lemma
+
+
+if __name__ == "__main__":
+ """
+ EXAMPLE:
+ python systems/parse_spacy.py --corpus_name Tiger \
+ -i /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09 \
+ -o /home/daza/datasets/TIGER_conll/tiger_spacy_parsed.conllu \
+ -t /home/daza/datasets/TIGER_conll/tiger_all.txt
+
+ python systems/parse_spacy.py --corpus_name DE_GSD --gld_token_type CoNLLUP_Token \
+ -i /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu \
+ -o /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.parsed.germalemma.conllu \
+ -t/home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.txt
+ """
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-i", "--input_file", help="Input Corpus", required=True)
+ parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
+ parser.add_argument("-o", "--output_file", help="File where the Predictions will be saved", required=True)
+ parser.add_argument("-t", "--text_file", help="Output Plain Text File", default=None)
+ parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLL09_Token")
+ parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True")
+ parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
+ args = parser.parse_args()
+
+ file_has_next, chunk_ix = True, 0
+ CHUNK_SIZE = 10000
+
+ # =====================================================================================
+ # LOGGING INFO ...
+ # =====================================================================================
+ logger = logging.getLogger(__name__)
+ console_hdlr = logging.StreamHandler(sys.stdout)
+ file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log")
+ logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
+ logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences")
+
+ # =====================================================================================
+ # POS TAG DOCUMENTS
+ # =====================================================================================
+ spacy_de = spacy.load("de_core_news_lg", disable=["ner", "parser"])
+ spacy_de.tokenizer = WhitespaceTokenizer(spacy_de.vocab) # We won't re-tokenize to respect how the source CoNLL are tokenized!
+ write_out = open(args.output_file, "w")
+ lemmatizer = GermaLemma()
+ if args.text_file: write_plain = open(args.text_file, "w")
+
+ start = time.time()
+ total_processed_sents = 0
+ line_generator = fu.file_generator(args.input_file)
+ while file_has_next:
+ sents, gld, file_has_next = fu.get_file_text_chunk(line_generator, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
+ if len(sents) == 0: break
+ total_processed_sents += len(sents)
+ logger.info(f"Already processed {total_processed_sents} sentences...")
+ for doc in spacy_de.pipe(sents, batch_size=1000, n_process=10):
+ conll_str = get_conll_str(doc, use_germalemma=args.use_germalemma)
+ write_out.write(conll_str)
+ write_out.write("\n\n")
+ if args.text_file:
+ write_plain.write(" ".join([x.text for x in doc])+"\n")
+
+ end = time.time()
+ logger.info(f"Processing {args.corpus_name} took {(end - start)} seconds!")
+
\ No newline at end of file
diff --git a/systems/parse_turku.py b/systems/parse_turku.py
new file mode 100644
index 0000000..7694504
--- /dev/null
+++ b/systems/parse_turku.py
@@ -0,0 +1,62 @@
+# TODO: write a client to make multiple requests to the server!
+import subprocess, json, time
+import requests, glob, logging
+import os.path, sys
+from lib.CoNLL_Annotation import get_token_type
+import my_utils.file_utils as fu
+import argparse
+
+
+TIGER_CORPUS = "/home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09"
+DE_GSD_CORPUS = "/home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu"
+
+
+if __name__ == "__main__":
+
+ """
+ EXECUTE:
+
+ python systems/parse_turku.py --corpus_name DE_GSD --gld_token_type CoNLLUP_Token \
+ -i /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+
+ """
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-i", "--input_file", help="Input Corpus", required=True)
+ parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
+ parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLL09_Token")
+ parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
+ args = parser.parse_args()
+
+ file_has_next, chunk_ix = True, 0
+ CHUNK_SIZE = 10000
+
+ # =====================================================================================
+ # LOGGING INFO ...
+ # =====================================================================================
+ logger = logging.getLogger(__name__)
+ console_hdlr = logging.StreamHandler(sys.stdout)
+ file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}_Turku.log")
+ logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
+ logger.info(f"Chunking TIGER Corpus in chunks of {CHUNK_SIZE} Sentences")
+
+ # =====================================================================================
+ # PROCESS (PARSE) TIGER Corpus ...
+ # =====================================================================================
+ start = time.time()
+ total_processed_sents = 0
+ line_generator = fu.file_generator(args.input_file)
+ while file_has_next:
+ raw_text, file_has_next, n_sents = fu.get_file_chunk(line_generator, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
+ total_processed_sents += n_sents
+ if len(raw_text) > 0:
+ fu.turku_parse_file(raw_text, args.input_file, chunk_ix)
+ now = time.time()
+ elapsed = (now - start)
+ logger.info(f"Time Elapsed: {elapsed}. Processed {total_processed_sents}. [{total_processed_sents/elapsed} Sents/sec]\n") # Toks/Sec???
+ chunk_ix += 1
+ if chunk_ix == 10: break
+ end = time.time()
+ logger.info(f"Processing File {args.corpus_name} took {(end - start)} seconds!")
+
+
\ No newline at end of file