Stable testing across datasets
diff --git a/systems/parse_turku.py b/systems/parse_turku.py
new file mode 100644
index 0000000..7694504
--- /dev/null
+++ b/systems/parse_turku.py
@@ -0,0 +1,62 @@
+# TODO: write a client to make multiple requests to the server!
+import subprocess, json, time
+import requests, glob, logging
+import os.path, sys
+from lib.CoNLL_Annotation import get_token_type
+import my_utils.file_utils as fu
+import argparse
+
+
+TIGER_CORPUS = "/home/daza/datasets/TIGER_conll/tiger_release_aug07.corrected.16012013.conll09"
+DE_GSD_CORPUS = "/home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu"
+
+
+if __name__ == "__main__":
+    
+    """
+    EXECUTE:
+        
+        python systems/parse_turku.py --corpus_name DE_GSD --gld_token_type CoNLLUP_Token \
+            -i /home/daza/datasets/ud-treebanks-v2.2/UD_German-GSD/de_gsd-ud-test.conllu
+            
+    """
+    
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_file", help="Input Corpus", required=True)
+    parser.add_argument("-n", "--corpus_name", help="Corpus Name", default="Corpus")
+    parser.add_argument("-gtt", "--gld_token_type", help="CoNLL Format of the Gold Data", default="CoNLL09_Token")
+    parser.add_argument("-c", "--comment_str", help="CoNLL Format of comentaries inside the file", default="#")
+    args = parser.parse_args()
+    
+    file_has_next, chunk_ix = True, 0
+    CHUNK_SIZE = 10000
+    
+    # =====================================================================================
+    #                    LOGGING INFO ...
+    # =====================================================================================
+    logger = logging.getLogger(__name__)
+    console_hdlr = logging.StreamHandler(sys.stdout)
+    file_hdlr = logging.FileHandler(filename=f"logs/Parse_{args.corpus_name}_Turku.log")
+    logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])
+    logger.info(f"Chunking TIGER Corpus in chunks of {CHUNK_SIZE} Sentences")
+    
+    # =====================================================================================
+    #                    PROCESS (PARSE) TIGER Corpus ...
+    # =====================================================================================
+    start = time.time()
+    total_processed_sents = 0
+    line_generator = fu.file_generator(args.input_file)
+    while file_has_next:
+        raw_text, file_has_next, n_sents = fu.get_file_chunk(line_generator, chunk_size=CHUNK_SIZE, token_class=get_token_type(args.gld_token_type), comment_str=args.comment_str)
+        total_processed_sents += n_sents
+        if len(raw_text) > 0:
+            fu.turku_parse_file(raw_text, args.input_file, chunk_ix)
+            now = time.time()
+            elapsed = (now - start)
+            logger.info(f"Time Elapsed: {elapsed}. Processed {total_processed_sents}. [{total_processed_sents/elapsed} Sents/sec]\n") # Toks/Sec???
+        chunk_ix += 1
+        if chunk_ix == 10: break      
+    end = time.time()
+    logger.info(f"Processing File {args.corpus_name} took {(end - start)} seconds!")
+    
+    
\ No newline at end of file