Stable testing across datasets
diff --git a/systems/parse_spacy.py b/systems/parse_spacy.py
new file mode 100644
index 0000000..75a1eb5
--- /dev/null
+++ b/systems/parse_spacy.py
@@ -0,0 +1,111 @@
+import argparse
+import spacy
+from spacy.tokens import Doc
+import logging, sys, time
+from lib.CoNLL_Annotation import get_token_type
+import my_utils.file_utils as fu
+from germalemma import GermaLemma
+
+
+class WhitespaceTokenizer(object):
+ def __init__(self, vocab):
+ self.vocab = vocab
+
+ def __call__(self, text):
+ words = text.split(' ')
+ # All tokens 'own' a subsequent space character in this tokenizer
+ spaces = [True] * len(words)
+ return Doc(self.vocab, words=words, spaces=spaces)
+
+
+def get_conll_str(spacy_doc, use_germalemma):
+ conll_lines = [] # We want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC]
+ for ix, token in enumerate(spacy_doc):
+ if use_germalemma == "True":
+ content = (str(ix), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, "_", "_", "_", "_", "_")
+ else:
+ content = (str(ix), token.text, token.lemma_, token.pos_, token.tag_, "_", "_", "_", "_", "_") # Pure SpaCy!
+ conll_lines.append("\t".join(content))
+ return "\n".join(conll_lines)
+
+
+# def freeling_lemma_lookup():
+# dicts_path = "/home/daza/Frameworks/FreeLing/data/de/dictionary/entries/"
+
+def find_germalemma(word, pos, spacy_lemma):
+ simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ",
+ "NA":"N", "NE":"N", "NN":"N",
+ "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV",
+ "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V",
+ "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V"
+ }
+ # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"}
+ try:
+ return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK"))
+ except:
+ return spacy_lemma
+
+
+if __name__ == "__main__":
+ """
+ EXAMPLE:
+ python systems/parse_spacy.py --corpus_name Tiger \
+ -i /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09 \
+ -o /home/daza/datasets/TIGER_conll/tiger_spacy_parsed.conllu \
+ -t /home/daza/datasets/TIGER_conll/tiger_all.txt
+
+ python systems/parse_spacy.py --corpus_name DE_GSD --gld_token_type CoNLLUP_Token \
+ -i /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu \
+ -o /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.parsed.germalemma.conllu \
+ -t/home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.txt
+ """
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-i", "--input_file", help="Input Corpus", required=True)
+ parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
+ parser.add_argument("-o", "--output_file", help="File where the Predictions will be saved", required=True)
+ parser.add_argument("-t", "--text_file", help="Output Plain Text File", default=None)
+ parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLL09_Token")
+ parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True")
+ parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
+ args = parser.parse_args()
+
+ file_has_next, chunk_ix = True, 0
+ CHUNK_SIZE = 10000
+
+ # =====================================================================================
+ # LOGGING INFO ...
+ # =====================================================================================
+ logger = logging.getLogger(__name__)
+ console_hdlr = logging.StreamHandler(sys.stdout)
+ file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log")
+ logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
+ logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences")
+
+ # =====================================================================================
+ # POS TAG DOCUMENTS
+ # =====================================================================================
+ spacy_de = spacy.load("de_core_news_lg", disable=["ner", "parser"])
+ spacy_de.tokenizer = WhitespaceTokenizer(spacy_de.vocab) # We won't re-tokenize to respect how the source CoNLL are tokenized!
+ write_out = open(args.output_file, "w")
+ lemmatizer = GermaLemma()
+ if args.text_file: write_plain = open(args.text_file, "w")
+
+ start = time.time()
+ total_processed_sents = 0
+ line_generator = fu.file_generator(args.input_file)
+ while file_has_next:
+ sents, gld, file_has_next = fu.get_file_text_chunk(line_generator, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
+ if len(sents) == 0: break
+ total_processed_sents += len(sents)
+ logger.info(f"Already processed {total_processed_sents} sentences...")
+ for doc in spacy_de.pipe(sents, batch_size=1000, n_process=10):
+ conll_str = get_conll_str(doc, use_germalemma=args.use_germalemma)
+ write_out.write(conll_str)
+ write_out.write("\n\n")
+ if args.text_file:
+ write_plain.write(" ".join([x.text for x in doc])+"\n")
+
+ end = time.time()
+ logger.info(f"Processing {args.corpus_name} took {(end - start)} seconds!")
+
\ No newline at end of file