upgrading repo to latest version
diff --git a/systems/parse_spacy3.py b/systems/parse_spacy3.py
new file mode 100644
index 0000000..14e5a9f
--- /dev/null
+++ b/systems/parse_spacy3.py
@@ -0,0 +1,123 @@
+import argparse, os
+import spacy
+from spacy.language import Language
+from spacy.tokens import Doc
+import logging, sys, time
+from lib.CoNLL_Annotation import get_token_type
+import my_utils.file_utils as fu
+from germalemma import GermaLemma
+
+
+@Language.factory("my_component")
+class WhitespaceTokenizer(object):
+ def __init__(self, nlp, name):
+ self.vocab = nlp.vocab
+
+ def __call__(self, text):
+ words = text.split(' ')
+ # All tokens 'own' a subsequent space character in this tokenizer
+ spaces = [True] * len(words)
+ return Doc(self.vocab, words=words, spaces=spaces)
+
+
+def get_conll_str(anno_obj, spacy_doc, use_germalemma):
+ # First lines are comments. (metadata)
+ conll_lines = anno_obj.metadata # Then we want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC]
+ for ix, token in enumerate(spacy_doc):
+ if use_germalemma == "True":
+ content = (str(ix), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, "_", "_", "_", "_", "_")
+ else:
+ content = (str(ix), token.text, token.lemma_, token.pos_, token.tag_, "_", "_", "_", "_", "_") # Pure SpaCy!
+ conll_lines.append("\t".join(content))
+ return "\n".join(conll_lines)
+
+
+# def freeling_lemma_lookup():
+# dicts_path = "/home/daza/Frameworks/FreeLing/data/de/dictionary/entries/"
+
+def find_germalemma(word, pos, spacy_lemma):
+ simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ",
+ "NA":"N", "NE":"N", "NN":"N",
+ "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV",
+ "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V",
+ "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V"
+ }
+ # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"}
+ try:
+ return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK"))
+ except:
+ return spacy_lemma
+
+
+if __name__ == "__main__":
+ """
+ EXAMPLE:
+ --- TIGER Classic Orthography ---
+ python systems/parse_spacy3.py --corpus_name TigerTestNew \
+ -i /home/daza/datasets/TIGER_conll/data_splits/test/Tiger.NewOrth.test.conll \
+ -o /home/daza/datasets/TIGER_conll/tiger_spacy3_parsed.conllu
+ """
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-i", "--input_file", help="Input Corpus", required=True)
+ parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
+ parser.add_argument("-o", "--output_file", help="File where the Predictions will be saved", required=True)
+ parser.add_argument("-sm", "--spacy_model", help="Spacy model containing the pipeline to tag", default="de_core_news_sm")
+ parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLLUP_Token")
+ parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True")
+ parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
+ args = parser.parse_args()
+
+ file_has_next, chunk_ix = True, 0
+ CHUNK_SIZE = 1000
+ SPACY_BATCH = 100
+ SPACY_PROC = 4
+
+ # =====================================================================================
+ # LOGGING INFO ...
+ # =====================================================================================
+ logger = logging.getLogger(__name__)
+ console_hdlr = logging.StreamHandler(sys.stdout)
+ file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log")
+ logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
+ logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences")
+
+ # =====================================================================================
+ # POS TAG DOCUMENTS
+ # =====================================================================================
+
+ if os.path.exists(args.spacy_model):
+ pass # Load Custom Trained model
+ else:
+ # try:
+ spacy_de = spacy.load(args.spacy_model, disable=["ner", "parser"])
+ spacy_de.tokenizer = WhitespaceTokenizer(spacy_de, "keep_original_tokens") # We won't re-tokenize to respect how the source CoNLL are tokenized!
+ # except:
+ # print(f"Check if model {args.spacy_model} is a valid SpaCy Pipeline or if the Path containing the trained model exists!")
+ # exit()
+
+ write_out = open(args.output_file, "w")
+ lemmatizer = GermaLemma()
+
+ if ".gz" == args.input_file[-3:]:
+ in_file = fu.expand_file(args.input_file)
+ else:
+ in_file = args.input_file
+
+ start = time.time()
+ total_processed_sents = 0
+ line_generator = fu.file_generator(in_file)
+ while file_has_next:
+ annos, file_has_next = fu.get_file_annos_chunk(line_generator, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
+ if len(annos) == 0: break
+ total_processed_sents += len(annos)
+ logger.info(f"Already processed {total_processed_sents} sentences...")
+ sents = [a.get_sentence() for a in annos]
+ for ix, doc in enumerate(spacy_de.pipe(sents, batch_size=SPACY_BATCH, n_process=SPACY_PROC)):
+ conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma)
+ write_out.write(conll_str)
+ write_out.write("\n\n")
+
+ end = time.time()
+ logger.info(f"Processing {args.corpus_name} took {(end - start)} seconds!")
+
\ No newline at end of file