blob: 3c562338b072c3c5f12c20fea74b3a68eaf6c4db [file] [log] [blame]
dazae3bc92e2020-11-04 11:06:26 +01001import argparse
2import spacy
3from spacy.tokens import Doc
4import logging, sys, time
5from lib.CoNLL_Annotation import get_token_type
6import my_utils.file_utils as fu
7from germalemma import GermaLemma
8
9
10class WhitespaceTokenizer(object):
11 def __init__(self, vocab):
12 self.vocab = vocab
13
14 def __call__(self, text):
15 words = text.split(' ')
16 # All tokens 'own' a subsequent space character in this tokenizer
17 spaces = [True] * len(words)
18 return Doc(self.vocab, words=words, spaces=spaces)
19
20
daza85347472020-11-23 18:43:33 +010021def get_conll_str(anno_obj, spacy_doc, use_germalemma):
22 # First lines are comments. (metadata)
23 conll_lines = anno_obj.metadata # Then we want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC]
dazae3bc92e2020-11-04 11:06:26 +010024 for ix, token in enumerate(spacy_doc):
25 if use_germalemma == "True":
26 content = (str(ix), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, "_", "_", "_", "_", "_")
27 else:
28 content = (str(ix), token.text, token.lemma_, token.pos_, token.tag_, "_", "_", "_", "_", "_") # Pure SpaCy!
29 conll_lines.append("\t".join(content))
30 return "\n".join(conll_lines)
31
32
33# def freeling_lemma_lookup():
34# dicts_path = "/home/daza/Frameworks/FreeLing/data/de/dictionary/entries/"
35
36def find_germalemma(word, pos, spacy_lemma):
37 simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ",
38 "NA":"N", "NE":"N", "NN":"N",
39 "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV",
40 "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V",
41 "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V"
42 }
43 # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"}
44 try:
45 return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK"))
46 except:
47 return spacy_lemma
48
49
50if __name__ == "__main__":
51 """
52 EXAMPLE:
daza85347472020-11-23 18:43:33 +010053 --- TIGER Classic Orthography ---
dazad7d70752021-01-12 18:17:49 +010054 python systems/parse_spacy.py --corpus_name Tiger --gld_token_type CoNLL09_Token \
dazae3bc92e2020-11-04 11:06:26 +010055 -i /home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09 \
56 -o /home/daza/datasets/TIGER_conll/tiger_spacy_parsed.conllu \
57 -t /home/daza/datasets/TIGER_conll/tiger_all.txt
dazad7d70752021-01-12 18:17:49 +010058
59 python systems/parse_spacy.py --corpus_name TigerOld_test \
60 -i /home/daza/datasets/TIGER_conll/data_splits/test/Tiger.OldOrth.test.conll \
61 -o /home/daza/datasets/TIGER_conll/tiger_spacy_parsed.test.conllu
daza85347472020-11-23 18:43:33 +010062
63 --- TIGER New Orthography ---
dazad7d70752021-01-12 18:17:49 +010064 python systems/parse_spacy.py --corpus_name TigerNew \
daza85347472020-11-23 18:43:33 +010065 -i /home/daza/datasets/TIGER_conll/Tiger.NewOrth.train.conll \
66 -o /home/daza/datasets/TIGER_conll/Tiger.NewOrth.train.spacy_parsed.conllu \
67 -t /home/daza/datasets/TIGER_conll/Tiger.NewOrth.train.txt
dazad7d70752021-01-12 18:17:49 +010068
69 python systems/parse_spacy.py --corpus_name TigerNew_test \
70 -i /home/daza/datasets/TIGER_conll/data_splits/test/Tiger.NewOrth.test.conll \
71 -o /home/daza/datasets/TIGER_conll/Tiger.NewOrth.test.spacy_parsed.conllu
daza85347472020-11-23 18:43:33 +010072
73 --- German GSD Universal Deps ---
dazad7d70752021-01-12 18:17:49 +010074 python systems/parse_spacy.py --corpus_name DE_GSD \
dazae3bc92e2020-11-04 11:06:26 +010075 -i /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu \
76 -o /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.parsed.germalemma.conllu \
daza85347472020-11-23 18:43:33 +010077 -t /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.txt
78
79
80 --- Real Data TEST ---
dazad7d70752021-01-12 18:17:49 +010081 time python systems/parse_spacy.py --corpus_name DeReKo_a00 --comment_str "#" \
daza85347472020-11-23 18:43:33 +010082 -i /export/netapp/kupietz/N-GRAMM-STUDIE/conllu/a00.conllu.gz \
83 -o /export/netapp/kupietz/N-GRAMM-STUDIE/conllu/0_SpaCyParsed/a00.spacy.gl.conllu
84
dazae3bc92e2020-11-04 11:06:26 +010085 """
86
87 parser = argparse.ArgumentParser()
88 parser.add_argument("-i", "--input_file", help="Input Corpus", required=True)
89 parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
90 parser.add_argument("-o", "--output_file", help="File where the Predictions will be saved", required=True)
91 parser.add_argument("-t", "--text_file", help="Output Plain Text File", default=None)
dazad7d70752021-01-12 18:17:49 +010092 parser.add_argument("-sm", "--spacy_model", help="Spacy model containing the pipeline to tag", default="de_core_news_lg")
93 parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLLUP_Token")
dazae3bc92e2020-11-04 11:06:26 +010094 parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True")
95 parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
96 args = parser.parse_args()
97
98 file_has_next, chunk_ix = True, 0
dazad7d70752021-01-12 18:17:49 +010099 CHUNK_SIZE = 20000
100 SPACY_BATCH = 2000
101 SPACY_PROC = 10
dazae3bc92e2020-11-04 11:06:26 +0100102
103 # =====================================================================================
104 # LOGGING INFO ...
105 # =====================================================================================
106 logger = logging.getLogger(__name__)
107 console_hdlr = logging.StreamHandler(sys.stdout)
108 file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log")
109 logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
110 logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences")
111
112 # =====================================================================================
113 # POS TAG DOCUMENTS
114 # =====================================================================================
dazad7d70752021-01-12 18:17:49 +0100115 spacy_de = spacy.load(args.spacy_model, disable=["ner", "parser"])
dazae3bc92e2020-11-04 11:06:26 +0100116 spacy_de.tokenizer = WhitespaceTokenizer(spacy_de.vocab) # We won't re-tokenize to respect how the source CoNLL are tokenized!
117 write_out = open(args.output_file, "w")
118 lemmatizer = GermaLemma()
119 if args.text_file: write_plain = open(args.text_file, "w")
120
daza85347472020-11-23 18:43:33 +0100121 if ".gz" == args.input_file[-3:]:
122 in_file = fu.expand_file(args.input_file)
123 else:
124 in_file = args.input_file
125
dazae3bc92e2020-11-04 11:06:26 +0100126 start = time.time()
127 total_processed_sents = 0
daza85347472020-11-23 18:43:33 +0100128 line_generator = fu.file_generator(in_file)
dazae3bc92e2020-11-04 11:06:26 +0100129 while file_has_next:
daza85347472020-11-23 18:43:33 +0100130 annos, file_has_next = fu.get_file_annos_chunk(line_generator, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
131 if len(annos) == 0: break
132 total_processed_sents += len(annos)
dazae3bc92e2020-11-04 11:06:26 +0100133 logger.info(f"Already processed {total_processed_sents} sentences...")
daza85347472020-11-23 18:43:33 +0100134 sents = [a.get_sentence() for a in annos]
135 for ix, doc in enumerate(spacy_de.pipe(sents, batch_size=SPACY_BATCH, n_process=SPACY_PROC)):
136 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma)
dazae3bc92e2020-11-04 11:06:26 +0100137 write_out.write(conll_str)
138 write_out.write("\n\n")
139 if args.text_file:
140 write_plain.write(" ".join([x.text for x in doc])+"\n")
141
142 end = time.time()
143 logger.info(f"Processing {args.corpus_name} took {(end - start)} seconds!")
144