daza | 48606ba | 2021-02-10 14:16:41 +0100 | [diff] [blame] | 1 | from sys import stdin |
| 2 | import argparse |
| 3 | import spacy |
| 4 | from spacy.tokens import Doc |
| 5 | import logging, sys, time |
| 6 | from lib.CoNLL_Annotation import get_token_type |
| 7 | import my_utils.file_utils as fu |
| 8 | from germalemma import GermaLemma |
| 9 | |
| 10 | |
| 11 | class WhitespaceTokenizer(object): |
| 12 | def __init__(self, vocab): |
| 13 | self.vocab = vocab |
| 14 | |
| 15 | def __call__(self, text): |
| 16 | words = text.split(' ') |
| 17 | # All tokens 'own' a subsequent space character in this tokenizer |
| 18 | spaces = [True] * len(words) |
| 19 | return Doc(self.vocab, words=words, spaces=spaces) |
| 20 | |
| 21 | |
| 22 | def get_conll_str(anno_obj, spacy_doc, use_germalemma): |
| 23 | # First lines are comments. (metadata) |
| 24 | conll_lines = anno_obj.metadata # Then we want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC] |
| 25 | for ix, token in enumerate(spacy_doc): |
| 26 | if use_germalemma == "True": |
| 27 | content = (str(ix), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, "_", "_", "_", "_", "_") |
| 28 | else: |
| 29 | content = (str(ix), token.text, token.lemma_, token.pos_, token.tag_, "_", "_", "_", "_", "_") # Pure SpaCy! |
| 30 | conll_lines.append("\t".join(content)) |
| 31 | return "\n".join(conll_lines) |
| 32 | |
| 33 | |
| 34 | def find_germalemma(word, pos, spacy_lemma): |
| 35 | simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ", |
| 36 | "NA":"N", "NE":"N", "NN":"N", |
| 37 | "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV", |
| 38 | "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V", |
| 39 | "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V" |
| 40 | } |
| 41 | # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"} |
| 42 | try: |
| 43 | return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK")) |
| 44 | except: |
| 45 | return spacy_lemma |
| 46 | |
| 47 | |
| 48 | if __name__ == "__main__": |
| 49 | """ |
| 50 | --- Example Real Data TEST --- |
| 51 | |
| 52 | cat /export/netapp/kupietz/N-GRAMM-STUDIE/conllu/zca18.conllu | python systems/parse_spacy_pipe.py \ |
| 53 | --corpus_name DeReKo_zca18 --comment_str "#" > output_zca18.conll |
| 54 | """ |
| 55 | |
| 56 | parser = argparse.ArgumentParser() |
| 57 | parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus") |
| 58 | parser.add_argument("-sm", "--spacy_model", help="Spacy model containing the pipeline to tag", default="de_core_news_lg") |
| 59 | parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLLUP_Token") |
| 60 | parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True") |
| 61 | parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#") |
| 62 | args = parser.parse_args() |
| 63 | |
| 64 | file_has_next, chunk_ix = True, 0 |
| 65 | CHUNK_SIZE = 20000 |
| 66 | SPACY_BATCH = 2000 |
| 67 | SPACY_PROC = 10 |
| 68 | |
| 69 | # ===================================================================================== |
| 70 | # LOGGING INFO ... |
| 71 | # ===================================================================================== |
| 72 | logger = logging.getLogger(__name__) |
| 73 | console_hdlr = logging.StreamHandler(sys.stderr) |
| 74 | file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log") |
| 75 | logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr]) |
| 76 | logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences") |
| 77 | |
| 78 | # ===================================================================================== |
| 79 | # POS TAG DOCUMENTS |
| 80 | # ===================================================================================== |
| 81 | spacy_de = spacy.load(args.spacy_model, disable=["ner", "parser"]) |
| 82 | spacy_de.tokenizer = WhitespaceTokenizer(spacy_de.vocab) # We won't re-tokenize to respect how the source CoNLL are tokenized! |
| 83 | lemmatizer = GermaLemma() |
| 84 | |
| 85 | start = time.time() |
| 86 | total_processed_sents = 0 |
| 87 | |
| 88 | while file_has_next: |
Marc Kupietz | a01314f | 2021-02-11 17:02:08 +0100 | [diff] [blame] | 89 | annos, file_has_next = fu.get_file_annos_chunk(stdin, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str, our_foundry="spacy") |
daza | 48606ba | 2021-02-10 14:16:41 +0100 | [diff] [blame] | 90 | if len(annos) == 0: break |
| 91 | total_processed_sents += len(annos) |
| 92 | logger.info(f"Already processed {total_processed_sents} sentences...") |
| 93 | sents = [a.get_sentence() for a in annos] |
| 94 | for ix, doc in enumerate(spacy_de.pipe(sents, batch_size=SPACY_BATCH, n_process=SPACY_PROC)): |
| 95 | conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma) |
| 96 | print(conll_str+ "\n") |
| 97 | |
| 98 | end = time.time() |
| 99 | logger.info(f"Processing {args.corpus_name} took {(end - start)} seconds!") |
| 100 | |