blob: 87366698e0821b696554b36d0cadd79ba6843769 [file] [log] [blame]
Marc Kupietz86044852025-11-29 10:19:03 +01001from sys import stdin
2import argparse, os
3import spacy
4from spacy.tokens import Doc
5import logging, sys, time, signal
6from lib.CoNLL_Annotation import get_token_type
7import my_utils.file_utils as fu
Marc Kupietz9baa27a2025-11-29 15:32:16 +01008
9# Try to import GermaLemma, but make it optional
10try:
11 from germalemma import GermaLemma
12 GERMALEMMA_AVAILABLE = True
13except ImportError:
14 GERMALEMMA_AVAILABLE = False
15 GermaLemma = None
Marc Kupietz86044852025-11-29 10:19:03 +010016
17# Dependency parsing safety limits
18DEFAULT_PARSE_TIMEOUT = 0.5 # seconds per sentence
19DEFAULT_MAX_SENTENCE_LENGTH = 500 # tokens
20
21class TimeoutException(Exception):
22 pass
23
24def timeout_handler(signum, frame):
25 raise TimeoutException("Dependency parsing timeout")
26
27def safe_dependency_parse(spacy_model, text, timeout=DEFAULT_PARSE_TIMEOUT, max_length=DEFAULT_MAX_SENTENCE_LENGTH):
28 """
29 Safely parse a sentence with timeout and length limits.
30
31 Args:
32 spacy_model: Loaded spaCy model
33 text: Text to parse
34 timeout: Maximum seconds to wait for parsing
35 max_length: Maximum sentence length in tokens
36
37 Returns:
38 tuple: (spacy_doc, success, warning_message)
39 """
40 # Check sentence length
41 if len(text.split()) > max_length:
42 # Process without dependency parsing for long sentences
43 disabled_components = ["ner", "parser"]
44 doc = spacy_model(text, disable=disabled_components)
45 return doc, False, f"Sentence too long ({len(text.split())} tokens > {max_length}), dependency parsing skipped"
46
47 # Set up timeout
48 old_handler = signal.signal(signal.SIGALRM, timeout_handler)
49 signal.setitimer(signal.ITIMER_REAL, timeout)
50
51 try:
52 doc = spacy_model(text)
53 signal.setitimer(signal.ITIMER_REAL, 0) # Cancel alarm
54 signal.signal(signal.SIGALRM, old_handler)
55 return doc, True, None
56 except TimeoutException:
57 signal.setitimer(signal.ITIMER_REAL, 0) # Cancel alarm
58 signal.signal(signal.SIGALRM, old_handler)
59 # Retry without dependency parsing
60 disabled_components = ["ner", "parser"]
61 doc = spacy_model(text, disable=disabled_components)
62 return doc, False, f"Dependency parsing timeout after {timeout}s, processed without dependencies"
63 except Exception as e:
64 signal.setitimer(signal.ITIMER_REAL, 0) # Cancel alarm
65 signal.signal(signal.SIGALRM, old_handler)
66 # Retry without dependency parsing
67 disabled_components = ["ner", "parser"]
68 doc = spacy_model(text, disable=disabled_components)
69 return doc, False, f"Dependency parsing error: {str(e)}, processed without dependencies"
70
71def format_morphological_features(token):
72 """
73 Extract and format morphological features from a spaCy token for CoNLL-U output.
74
75 Args:
76 token: spaCy token object
77
78 Returns:
79 str: Formatted morphological features string for CoNLL-U 5th column
80 Returns "_" if no features are available
81 """
82 if not hasattr(token, 'morph') or not token.morph:
83 return "_"
84
85 morph_dict = token.morph.to_dict()
86 if not morph_dict:
87 return "_"
88
89 # Format as CoNLL-U format: Feature=Value|Feature2=Value2
90 features = []
91 for feature, value in sorted(morph_dict.items()):
92 features.append(f"{feature}={value}")
93
94 return "|".join(features)
95
96
97def format_dependency_relations(doc):
98 """
99 Extract and format dependency relations from a spaCy doc for CoNLL-U output.
100
101 Args:
102 doc: spaCy Doc object
103
104 Returns:
105 list: List of tuples (head_id, deprel) for each token
106 """
107 dependencies = []
108 for i, token in enumerate(doc):
109 # HEAD column: 1-based index of the head token (0 for root)
110 if token.dep_ == "ROOT":
111 head_id = 0
112 else:
113 # Find the 1-based index of the head token
114 head_id = None
115 for j, potential_head in enumerate(doc):
116 if potential_head == token.head:
117 head_id = j + 1
118 break
119 if head_id is None:
120 head_id = 0 # Fallback to root if head not found
121
122 # DEPREL column: dependency relation
123 deprel = token.dep_ if token.dep_ else "_"
124
125 dependencies.append((head_id, deprel))
126
127 return dependencies
128
129
130class WhitespaceTokenizer(object):
131 def __init__(self, vocab):
132 self.vocab = vocab
133
134 def __call__(self, text):
135 words = text.split(' ')
136 # Filter out empty strings to avoid spaCy errors
137 words = [w for w in words if w]
138 # Handle edge case of empty input - use a placeholder token
139 if not words:
140 words = ['_EMPTY_']
141 # All tokens 'own' a subsequent space character in this tokenizer
142 spaces = [True] * len(words)
143 return Doc(self.vocab, words=words, spaces=spaces)
144
145
146def get_conll_str(anno_obj, spacy_doc, use_germalemma, use_dependencies):
147 # First lines are comments. (metadata)
148 conll_lines = anno_obj.metadata # Then we want: [ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC]
149
150 # Get dependency relations if enabled
151 dependencies = format_dependency_relations(spacy_doc) if use_dependencies == "True" else None
152
153 for ix, token in enumerate(spacy_doc):
154 morph_features = format_morphological_features(token)
155
156 # Get HEAD and DEPREL columns
157 if dependencies:
158 head_id, deprel = dependencies[ix]
159 else:
160 head_id, deprel = "_", "_"
161
162 if use_germalemma == "True":
163 content = (str(ix+1), token.text, find_germalemma(token.text, token.tag_, token.lemma_), token.pos_, token.tag_, morph_features, str(head_id), deprel, "_", "_")
164 else:
165 content = (str(ix+1), token.text, token.lemma_, token.pos_, token.tag_, morph_features, str(head_id), deprel, "_", "_") # Pure SpaCy!
166 conll_lines.append("\t".join(content))
167 return "\n".join(conll_lines)
168
169
170def find_germalemma(word, pos, spacy_lemma):
171 simplify_pos = {"ADJA":"ADJ", "ADJD":"ADJ",
172 "NA":"N", "NE":"N", "NN":"N",
173 "ADV":"ADV", "PAV":"ADV", "PROAV":"ADV", "PAVREL":"ADV", "PWAV":"ADV", "PWAVREL":"ADV",
174 "VAFIN":"V", "VAIMP":"V", "VAINF":"V", "VAPP":"V", "VMFIN":"V", "VMINF":"V",
175 "VMPP":"V", "VVFIN":"V", "VVIMP":"V", "VVINF":"V", "VVIZU":"V","VVPP":"V"
176 }
177 # simplify_pos = {"VERB": "V", "ADV": "ADV", "ADJ": "ADJ", "NOUN":"N", "PROPN": "N"}
178 try:
179 return lemmatizer.find_lemma(word, simplify_pos.get(pos, "UNK"))
180 except:
181 return spacy_lemma
182
183
184if __name__ == "__main__":
185 """
186 --- Example Real Data TEST ---
187
188 cat /export/netapp/kupietz/N-GRAMM-STUDIE/conllu/zca18.conllu | python systems/parse_spacy_pipe.py \
189 --corpus_name DeReKo_zca18 --comment_str "#" > output_zca18.conll
190 """
191
192 parser = argparse.ArgumentParser()
193 parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
194 parser.add_argument("-sm", "--spacy_model", help="Spacy model containing the pipeline to tag", default="de_core_news_lg")
195 parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLLUP_Token")
196 parser.add_argument("-ugl", "--use_germalemma", help="Use Germalemma lemmatizer on top of SpaCy", default="True")
197 parser.add_argument("-udp", "--use_dependencies", help="Include dependency parsing (adds HEAD/DEPREL columns, set to False for faster processing)", default="True")
198 parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
199 args = parser.parse_args()
200
201 file_has_next, chunk_ix = True, 0
202 CHUNK_SIZE = int(os.getenv("SPACY_CHUNK_SIZE", "20000"))
203 SPACY_BATCH = int(os.getenv("SPACY_BATCH_SIZE", "2000"))
204 SPACY_PROC = int(os.getenv("SPACY_N_PROCESS", "1"))
205
206 # =====================================================================================
207 # LOGGING INFO ...
208 # =====================================================================================
209 logger = logging.getLogger(__name__)
210 console_hdlr = logging.StreamHandler(sys.stderr)
211 file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}.SpaCy.log")
212
213 # Custom format without module name
214 formatter = logging.Formatter('%(levelname)s: %(message)s')
215 console_hdlr.setFormatter(formatter)
216 file_hdlr.setFormatter(formatter)
217
218 logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
219
220 # Override with environment variables if set (useful for Docker)
221 import os
222 if os.getenv("SPACY_USE_DEPENDENCIES") is not None:
223 args.use_dependencies = os.getenv("SPACY_USE_DEPENDENCIES", "True")
224 logger.info(f"Using SPACY_USE_DEPENDENCIES environment variable: {args.use_dependencies}")
225
226 if os.getenv("SPACY_USE_GERMALEMMA") is not None:
227 args.use_germalemma = os.getenv("SPACY_USE_GERMALEMMA", "True")
228 logger.info(f"Using SPACY_USE_GERMALEMMA environment variable: {args.use_germalemma}")
229
230 logger.info(f"Chunking {args.corpus_name} Corpus in chunks of {CHUNK_SIZE} Sentences")
231 logger.info(f"Processing configuration: batch_size={SPACY_BATCH}, n_process={SPACY_PROC}")
232
233 # =====================================================================================
234 # POS TAG DOCUMENTS
235 # =====================================================================================
236 # Configure which components to disable based on dependency parsing option
237 disabled_components = ["ner"]
238 if args.use_dependencies != "True":
239 disabled_components.append("parser")
240 logger.info("Dependency parsing disabled for faster processing")
241 else:
242 logger.info("Dependency parsing enabled (slower but includes HEAD/DEPREL)")
243
244 spacy_de = spacy.load(args.spacy_model, disable=disabled_components)
245 spacy_de.tokenizer = WhitespaceTokenizer(spacy_de.vocab) # We won't re-tokenize to respect how the source CoNLL are tokenized!
Marc Kupietz9baa27a2025-11-29 15:32:16 +0100246
Marc Kupietz86044852025-11-29 10:19:03 +0100247 # Increase max_length to handle very long sentences (especially when parser is disabled)
248 spacy_de.max_length = 10000000 # 10M characters
Marc Kupietz9baa27a2025-11-29 15:32:16 +0100249
250 # Initialize GermaLemma if available and requested
251 lemmatizer = None
252 if args.use_germalemma == "True":
253 if GERMALEMMA_AVAILABLE:
254 lemmatizer = GermaLemma()
255 else:
256 logger.warning("GermaLemma requested but not available. Using spaCy lemmatizer instead.")
257 args.use_germalemma = "False"
Marc Kupietz86044852025-11-29 10:19:03 +0100258
259 # Log version information
260 logger.info(f"spaCy version: {spacy.__version__}")
261 logger.info(f"spaCy model: {args.spacy_model}")
262 logger.info(f"spaCy model version: {spacy_de.meta.get('version', 'unknown')}")
Marc Kupietz9baa27a2025-11-29 15:32:16 +0100263 if GERMALEMMA_AVAILABLE:
264 try:
265 import germalemma
266 logger.info(f"GermaLemma version: {germalemma.__version__}")
267 except AttributeError:
268 logger.info("GermaLemma version: unknown (no __version__ attribute)")
269 else:
270 logger.info("GermaLemma: not installed")
Marc Kupietz86044852025-11-29 10:19:03 +0100271
272 # Parse timeout and sentence length limits from environment variables
273 parse_timeout = float(os.getenv("SPACY_PARSE_TIMEOUT", str(DEFAULT_PARSE_TIMEOUT)))
274 max_sentence_length = int(os.getenv("SPACY_MAX_SENTENCE_LENGTH", str(DEFAULT_MAX_SENTENCE_LENGTH)))
275
276 logger.info(f"Dependency parsing limits: timeout={parse_timeout}s, max_length={max_sentence_length} tokens")
277
278 start = time.time()
279 total_processed_sents = 0
280 dependency_warnings = 0
281
282 while file_has_next:
283 annos, file_has_next = fu.get_file_annos_chunk(stdin, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str, our_foundry="spacy")
284 if len(annos) == 0: break
285 total_processed_sents += len(annos)
286
287 # Calculate progress statistics
288 elapsed_time = time.time() - start
289 sents_per_sec = total_processed_sents / elapsed_time if elapsed_time > 0 else 0
290 current_time = time.strftime("%Y-%m-%d %H:%M:%S")
291
292 logger.info(f"{current_time} | Processed: {total_processed_sents} sentences | Elapsed: {elapsed_time:.1f}s | Speed: {sents_per_sec:.1f} sents/sec")
293
294 sents = [a.get_sentence() for a in annos]
295
296 # Process sentences individually when dependency parsing is enabled for timeout protection
297 if args.use_dependencies == "True":
298 for ix, sent in enumerate(sents):
299 doc, dependency_success, warning = safe_dependency_parse(
300 spacy_de, sent, timeout=parse_timeout, max_length=max_sentence_length
301 )
302 if warning:
303 dependency_warnings += 1
304 logger.warning(f"Sentence {total_processed_sents - len(sents) + ix + 1}: {warning}")
305
306 # Override use_dependencies based on actual parsing success
307 actual_use_dependencies = "True" if dependency_success else "False"
308 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma, use_dependencies=actual_use_dependencies)
309 print(conll_str+ "\n")
310 else:
311 # Use batch processing for faster processing when dependencies are disabled
312 # Use n_process=1 to avoid multiprocessing deadlocks and memory issues with large files
313 try:
314 for ix, doc in enumerate(spacy_de.pipe(sents, batch_size=SPACY_BATCH, n_process=1)):
315 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma, use_dependencies=args.use_dependencies)
316 print(conll_str+ "\n")
317 except Exception as e:
318 logger.error(f"Batch processing failed: {str(e)}")
319 logger.info("Falling back to individual sentence processing...")
320 # Fallback: process sentences individually
321 for ix, sent in enumerate(sents):
322 try:
323 doc = spacy_de(sent)
324 conll_str = get_conll_str(annos[ix], doc, use_germalemma=args.use_germalemma, use_dependencies=args.use_dependencies)
325 print(conll_str+ "\n")
326 except Exception as sent_error:
327 logger.error(f"Failed to process sentence {total_processed_sents - len(sents) + ix + 1}: {str(sent_error)}")
328 logger.error(f"Sentence preview: {sent[:100]}...")
329 # Output a placeholder to maintain alignment
330 conll_str = get_conll_str(annos[ix], spacy_de("ERROR"), use_germalemma=args.use_germalemma, use_dependencies=args.use_dependencies)
331 print(conll_str+ "\n")
332
333 end = time.time()
334 total_time = end - start
335 final_sents_per_sec = total_processed_sents / total_time if total_time > 0 else 0
336
337 logger.info(f"=== Processing Complete ===")
338 logger.info(f"Total sentences: {total_processed_sents}")
339 logger.info(f"Total time: {total_time:.2f}s")
340 logger.info(f"Average speed: {final_sents_per_sec:.1f} sents/sec")
341
342 if dependency_warnings > 0:
343 logger.info(f"Dependency parsing warnings: {dependency_warnings} sentences processed without dependencies")
344