blob: f6dd7fac989bb5da30ee0d4620def361ea0ab14f [file] [log] [blame]
daza48606ba2021-02-10 14:16:41 +01001from sys import stdin
Marc Kupietz095185b2025-10-27 14:41:43 +01002import argparse, os
daza48606ba2021-02-10 14:16:41 +01003import spacy
4from spacy.tokens import Doc
Marc Kupietz095185b2025-10-27 14:41:43 +01005import logging, sys, time, signal
daza48606ba2021-02-10 14:16:41 +01006from lib.CoNLL_Annotation import get_token_type
7import my_utils.file_utils as fu
8from germalemma import GermaLemma
9
Marc Kupietz095185b2025-10-27 14:41:43 +010010# Dependency parsing safety limits
Marc Kupietz6b848db2025-10-30 11:38:59 +010011DEFAULT_PARSE_TIMEOUT = 0.5 # seconds per sentence
Marc Kupietz095185b2025-10-27 14:41:43 +010012DEFAULT_MAX_SENTENCE_LENGTH = 500 # tokens
13
14class TimeoutException(Exception):
15 pass
16
17def timeout_handler(signum, frame):
18 raise TimeoutException("Dependency parsing timeout")
19
20def safe_dependency_parse(spacy_model, text, timeout=DEFAULT_PARSE_TIMEOUT, max_length=DEFAULT_MAX_SENTENCE_LENGTH):
21 """
22 Safely parse a sentence with timeout and length limits.
23
24 Args:
25 spacy_model: Loaded spaCy model
26 text: Text to parse
27 timeout: Maximum seconds to wait for parsing
28 max_length: Maximum sentence length in tokens
29
30 Returns:
31 tuple: (spacy_doc, success, warning_message)
32 """
33 # Check sentence length
34 if len(text.split()) > max_length:
35 # Process without dependency parsing for long sentences
36 disabled_components = ["ner", "parser"]
37 doc = spacy_model(text, disable=disabled_components)
38 return doc, False, f"Sentence too long ({len(text.split())} tokens > {max_length}), dependency parsing skipped"
39
40 # Set up timeout
41 old_handler = signal.signal(signal.SIGALRM, timeout_handler)
Marc Kupietz6b848db2025-10-30 11:38:59 +010042 signal.setitimer(signal.ITIMER_REAL, timeout)
Marc Kupietz095185b2025-10-27 14:41:43 +010043
44 try:
45 doc = spacy_model(text)
Marc Kupietz6b848db2025-10-30 11:38:59 +010046 signal.setitimer(signal.ITIMER_REAL, 0) # Cancel alarm
Marc Kupietz095185b2025-10-27 14:41:43 +010047 signal.signal(signal.SIGALRM, old_handler)
48 return doc, True, None
49 except TimeoutException:
Marc Kupietz6b848db2025-10-30 11:38:59 +010050 signal.setitimer(signal.ITIMER_REAL, 0) # Cancel alarm
Marc Kupietz095185b2025-10-27 14:41:43 +010051 signal.signal(signal.SIGALRM, old_handler)
52 # Retry without dependency parsing
53 disabled_components = ["ner", "parser"]
54 doc = spacy_model(text, disable=disabled_components)
55 return doc, False, f"Dependency parsing timeout after {timeout}s, processed without dependencies"
56 except Exception as e:
Marc Kupietz6b848db2025-10-30 11:38:59 +010057 signal.setitimer(signal.ITIMER_REAL, 0) # Cancel alarm
Marc Kupietz095185b2025-10-27 14:41:43 +010058 signal.signal(signal.SIGALRM, old_handler)
59 # Retry without dependency parsing
60 disabled_components = ["ner", "parser"]
61 doc = spacy_model(text, disable=disabled_components)
62 return doc, False, f"Dependency parsing error: {str(e)}, processed without dependencies"
63
Marc Kupietz88eea722025-10-26 15:21:14 +010064def format_morphological_features(token):
65 """
66 Extract and format morphological features from a spaCy token for CoNLL-U output.
67
68 Args:
69 token: spaCy token object
70
71 Returns:
72 str: Formatted morphological features string for CoNLL-U 5th column
73 Returns "_" if no features are available
74 """
75 if not hasattr(token, 'morph') or not token.morph:
76 return "_"
77
78 morph_dict = token.morph.to_dict()
79 if not morph_dict:
80 return "_"
81
82 # Format as CoNLL-U format: Feature=Value|Feature2=Value2
83 features = []
84 for feature, value in sorted(morph_dict.items()):
85 features.append(f"{feature}={value}")
86
87 return "|".join(features)
88
daza48606ba2021-02-10 14:16:41 +010089
Marc Kupietz0ce98a62025-10-26 15:59:27 +010090def format_dependency_relations(doc):
91 """
92 Extract and format dependency relations from a spaCy doc for CoNLL-U output.
93
94 Args:
95 doc: spaCy Doc object
96
97 Returns:
98 list: List of tuples (head_id, deprel) for each token
99 """
100 dependencies = []
101 for i, token in enumerate(doc):
102 # HEAD column: 1-based index of the head token (0 for root)
103 if token.dep_ == "ROOT":
104 head_id = 0
105 else:
106 # Find the 1-based index of the head token
107 head_id = None
108 for j, potential_head in enumerate(doc):
109 if potential_head == token.head:
110 head_id = j + 1
111 break
112 if head_id is None:
113 head_id = 0 # Fallback to root if head not found
114
115 # DEPREL column: dependency relation
116 deprel = token.dep_ if token.dep_ else "_"
117
118 dependencies.append((head_id, deprel))
119
120 return dependencies
121
122
daza48606ba2021-02-10 14:16:41 +0100123class WhitespaceTokenizer(object):
124 def __init__(self, vocab):
125 self.vocab = vocab
126
127 def __call__(self, text):
128 words = text.split(' ')
Marc Kupietz732b3f42025-10-31 17:19:11 +0100129 # Filter out empty strings to avoid spaCy errors
130 words = [w for w in words if w]
131 # Handle edge case of empty input
132 if not words:
133 words = ['']
daza48606ba2021-02-10 14:16:41 +0100134 # All tokens 'own' a subsequent space character in this tokenizer
135 spaces = [True] * len(words)
136 return Doc(self.vocab, words=words, spaces=spaces)
137
138
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100139def get_conll_str(anno_obj, spacy_doc, use_germalemma, use_dependencies):
daza48606ba2021-02-10 14:16:41 +0100140 # First lines are comments. (metadata)
141 conll_lines = anno_obj.metadata # Then we want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC]
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100142
143 # Get dependency relations if enabled
144 dependencies = format_dependency_relations(spacy_doc) if use_dependencies == "True" else None
145
daza48606ba2021-02-10 14:16:41 +0100146 for ix, token in enumerate(spacy_doc):
Marc Kupietz88eea722025-10-26 15:21:14 +0100147 morph_features = format_morphological_features(token)
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100148
149 # Get HEAD and DEPREL columns
150 if dependencies:
151 head_id, deprel = dependencies[ix]
daza48606ba2021-02-10 14:16:41 +0100152 else:
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100153 head_id, deprel = "_", "_"
154
155 if use_germalemma == "True":
156 content = (str(ix+1), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, morph_features, str(head_id), deprel, "_", "_")
157 else:
158 content = (str(ix+1), token.text, token.lemma_, token.pos_, token.tag_, morph_features, str(head_id), deprel, "_", "_") # Pure SpaCy!
daza48606ba2021-02-10 14:16:41 +0100159 conll_lines.append("\t".join(content))
160 return "\n".join(conll_lines)
161
162
163def find_germalemma(word, pos, spacy_lemma):
164 simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ",
165 "NA":"N", "NE":"N", "NN":"N",
166 "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV",
167 "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V",
168 "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V"
169 }
170 # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"}
171 try:
172 return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK"))
173 except:
174 return spacy_lemma
175
176
177if __name__ == "__main__":
178 """
179 --- Example Real Data TEST ---
180
181 cat /export/netapp/kupietz/N-GRAMM-STUDIE/conllu/zca18.conllu | python systems/parse_spacy_pipe.py \
182 --corpus_name DeReKo_zca18 --comment_str "#" > output_zca18.conll
183 """
184
185 parser = argparse.ArgumentParser()
186 parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
187 parser.add_argument("-sm", "--spacy_model", help="Spacy model containing the pipeline to tag", default="de_core_news_lg")
188 parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLLUP_Token")
189 parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True")
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100190 parser.add_argument("-udp", "--use_dependencies", help="Include dependency parsing (adds HEAD/DEPREL columns, set to False for faster processing)", default="True")
daza48606ba2021-02-10 14:16:41 +0100191 parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
192 args = parser.parse_args()
193
194 file_has_next, chunk_ix = True, 0
Marc Kupietz45e74df2025-10-29 18:56:08 +0100195 CHUNK_SIZE = int(os.getenv("SPACY_CHUNK_SIZE", "20000"))
196 SPACY_BATCH = int(os.getenv("SPACY_BATCH_SIZE", "2000"))
Marc Kupietz732b3f42025-10-31 17:19:11 +0100197 SPACY_PROC = int(os.getenv("SPACY_N_PROCESS", "1"))
daza48606ba2021-02-10 14:16:41 +0100198
199 # =====================================================================================
200 # LOGGING INFO ...
201 # =====================================================================================
202 logger = logging.getLogger(__name__)
203 console_hdlr = logging.StreamHandler(sys.stderr)
204 file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log")
Marc Kupietz82b64772025-10-30 10:05:56 +0100205
206 # Custom format without module name
207 formatter = logging.Formatter('%(levelname)s: %(message)s')
208 console_hdlr.setFormatter(formatter)
209 file_hdlr.setFormatter(formatter)
210
daza48606ba2021-02-10 14:16:41 +0100211 logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100212
213 # Override with environment variables if set (useful for Docker)
214 import os
215 if os.getenv("SPACY_USE_DEPENDENCIES") is not None:
216 args.use_dependencies = os.getenv("SPACY_USE_DEPENDENCIES", "True")
217 logger.info(f"Using SPACY_USE_DEPENDENCIES environment variable: {args.use_dependencies}")
218
219 if os.getenv("SPACY_USE_GERMALEMMA") is not None:
220 args.use_germalemma = os.getenv("SPACY_USE_GERMALEMMA", "True")
221 logger.info(f"Using SPACY_USE_GERMALEMMA environment variable: {args.use_germalemma}")
222
daza48606ba2021-02-10 14:16:41 +0100223 logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences")
Marc Kupietz45e74df2025-10-29 18:56:08 +0100224 logger.info(f"Processing configuration: batch_size={SPACY_BATCH}, n_process={SPACY_PROC}")
daza48606ba2021-02-10 14:16:41 +0100225
226 # =====================================================================================
227 # POS TAG DOCUMENTS
228 # =====================================================================================
Marc Kupietz0ce98a62025-10-26 15:59:27 +0100229 # Configure which components to disable based on dependency parsing option
230 disabled_components = ["ner"]
231 if args.use_dependencies != "True":
232 disabled_components.append("parser")
233 logger.info("Dependency parsing disabled for faster processing")
234 else:
235 logger.info("Dependency parsing enabled (slower but includes HEAD/DEPREL)")
236
237 spacy_de = spacy.load(args.spacy_model, disable=disabled_components)
daza48606ba2021-02-10 14:16:41 +0100238 spacy_de.tokenizer = WhitespaceTokenizer(spacy_de.vocab) # We won't re-tokenize to respect how the source CoNLL are tokenized!
Marc Kupietz732b3f42025-10-31 17:19:11 +0100239
240 # Increase max_length to handle very long sentences (especially when parser is disabled)
241 spacy_de.max_length = 10000000 # 10M characters
242
daza48606ba2021-02-10 14:16:41 +0100243 lemmatizer = GermaLemma()
244
Marc Kupietzf629a402025-10-26 21:54:33 +0100245 # Log version information
246 logger.info(f"spaCy version: {spacy.__version__}")
247 logger.info(f"spaCy model: {args.spacy_model}")
248 logger.info(f"spaCy model version: {spacy_de.meta.get('version', 'unknown')}")
249 try:
250 import germalemma
251 logger.info(f"GermaLemma version: {germalemma.__version__}")
252 except AttributeError:
253 logger.info("GermaLemma version: unknown (no __version__ attribute)")
254
Marc Kupietz095185b2025-10-27 14:41:43 +0100255 # Parse timeout and sentence length limits from environment variables
Marc Kupietz6b848db2025-10-30 11:38:59 +0100256 parse_timeout = float(os.getenv("SPACY_PARSE_TIMEOUT", str(DEFAULT_PARSE_TIMEOUT)))
257 max_sentence_length = int(os.getenv("SPACY_MAX_SENTENCE_LENGTH", str(DEFAULT_MAX_SENTENCE_LENGTH)))
Marc Kupietz095185b2025-10-27 14:41:43 +0100258
259 logger.info(f"Dependency parsing limits: timeout={parse_timeout}s, max_length={max_sentence_length} tokens")
260
daza48606ba2021-02-10 14:16:41 +0100261 start = time.time()
262 total_processed_sents = 0
Marc Kupietz095185b2025-10-27 14:41:43 +0100263 dependency_warnings = 0
daza48606ba2021-02-10 14:16:41 +0100264
265 while file_has_next:
Marc Kupietza01314f2021-02-11 17:02:08 +0100266 annos, file_has_next = fu.get_file_annos_chunk(stdin, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str, our_foundry="spacy")
daza48606ba2021-02-10 14:16:41 +0100267 if len(annos) == 0: break
268 total_processed_sents += len(annos)
Marc Kupietz82b64772025-10-30 10:05:56 +0100269
270 # Calculate progress statistics
271 elapsed_time = time.time() - start
272 sents_per_sec = total_processed_sents / elapsed_time if elapsed_time > 0 else 0
273 current_time = time.strftime("%Y-%m-%d %H:%M:%S")
274
275 logger.info(f"{current_time} | Processed: {total_processed_sents} sentences | Elapsed: {elapsed_time:.1f}s | Speed: {sents_per_sec:.1f} sents/sec")
276
daza48606ba2021-02-10 14:16:41 +0100277 sents = [a.get_sentence() for a in annos]
Marc Kupietz095185b2025-10-27 14:41:43 +0100278
279 # Process sentences individually when dependency parsing is enabled for timeout protection
280 if args.use_dependencies == "True":
281 for ix, sent in enumerate(sents):
282 doc, dependency_success, warning = safe_dependency_parse(
283 spacy_de, sent, timeout=parse_timeout, max_length=max_sentence_length
284 )
285 if warning:
286 dependency_warnings += 1
287 logger.warning(f"Sentence {total_processed_sents - len(sents) + ix + 1}: {warning}")
288
289 # Override use_dependencies based on actual parsing success
Marc Kupietz9158a162025-10-28 09:08:12 +0100290 actual_use_dependencies = "True" if dependency_success else "False"
Marc Kupietz095185b2025-10-27 14:41:43 +0100291 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma, use_dependencies=actual_use_dependencies)
292 print(conll_str+ "\n")
293 else:
294 # Use batch processing for faster processing when dependencies are disabled
Marc Kupietz732b3f42025-10-31 17:19:11 +0100295 # Use n_process=1 to avoid multiprocessing deadlocks and memory issues with large files
296 try:
297 for ix, doc in enumerate(spacy_de.pipe(sents, batch_size=SPACY_BATCH, n_process=1)):
298 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma, use_dependencies=args.use_dependencies)
299 print(conll_str+ "\n")
300 except Exception as e:
301 logger.error(f"Batch processing failed: {str(e)}")
302 logger.info("Falling back to individual sentence processing...")
303 # Fallback: process sentences individually
304 for ix, sent in enumerate(sents):
305 try:
306 doc = spacy_de(sent)
307 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma, use_dependencies=args.use_dependencies)
308 print(conll_str+ "\n")
309 except Exception as sent_error:
310 logger.error(f"Failed to process sentence {total_processed_sents - len(sents) + ix + 1}: {str(sent_error)}")
311 logger.error(f"Sentence preview: {sent[:100]}...")
312 # Output a placeholder to maintain alignment
313 conll_str = get_conll_str(annos[ix], spacy_de("ERROR"), use_germalemma=args.use_germalemma, use_dependencies=args.use_dependencies)
314 print(conll_str+ "\n")
daza48606ba2021-02-10 14:16:41 +0100315
316 end = time.time()
Marc Kupietz82b64772025-10-30 10:05:56 +0100317 total_time = end - start
318 final_sents_per_sec = total_processed_sents / total_time if total_time > 0 else 0
319
320 logger.info(f"=== Processing Complete ===")
321 logger.info(f"Total sentences: {total_processed_sents}")
322 logger.info(f"Total time: {total_time:.2f}s")
323 logger.info(f"Average speed: {final_sents_per_sec:.1f} sents/sec")
324
Marc Kupietz095185b2025-10-27 14:41:43 +0100325 if dependency_warnings > 0:
326 logger.info(f"Dependency parsing warnings: {dependency_warnings} sentences processed without dependencies")
daza48606ba2021-02-10 14:16:41 +0100327