Improve handling of sentence splits including speech

Change-Id: Id699624f17e3e983b0900d48e95c1d7e782e2215
diff --git a/Changes b/Changes
index e3c52a1..396c19f 100644
--- a/Changes
+++ b/Changes
@@ -1,3 +1,6 @@
+0.1.3 2022-03-08
+    - Introduced refined handling of sentences including speech.
+
 0.1.2 2021-12-07
     - Improve performance of rune to symbol conversion in transduction
       method.
diff --git a/matrix_test.go b/matrix_test.go
index 7919115..80523e5 100644
--- a/matrix_test.go
+++ b/matrix_test.go
@@ -286,10 +286,22 @@
 	assert.True(mat.Transduce(strings.NewReader("Ich wohne in der Weststr. und Du?"), w))
 	sentences = strings.Split(w.String(), "\n\n")
 	assert.Equal(len(sentences), 2)
-	/*
-		Test:
-		"\"Ausschalten!!!\", sagte er. \"Hast Du nicht gehört???\""), w))
-	*/
+
+	w.Reset()
+	assert.True(mat.Transduce(strings.NewReader("\"Alter!\", sagte er: \"Komm nicht wieder!\" Geh!!! \"Lass!\" Dann ging er."), w))
+	sentences = strings.Split(w.String(), "\n\n")
+	assert.Equal(len(sentences), 5)
+	assert.Equal("\"\nAlter\n!\n\"\n,\nsagte\ner\n:\n\"\nKomm\nnicht\nwieder\n!\n\"", sentences[0])
+	assert.Equal("Geh\n!!!", sentences[1])
+	assert.Equal("\"\nLass\n!\n\"", sentences[2])
+	assert.Equal("Dann\nging\ner\n.", sentences[3])
+
+	w.Reset()
+	assert.True(mat.Transduce(strings.NewReader("\"Ausschalten!!!\", sagte er. \"Hast Du nicht gehört???\""), w))
+	sentences = strings.Split(w.String(), "\n\n")
+	assert.Equal(len(sentences), 3)
+	assert.Equal("\"\nAusschalten\n!!!\n\"\n,\nsagte\ner\n.", sentences[0])
+	assert.Equal("\"\nHast\nDu\nnicht\ngehört\n???\n\"", sentences[1])
 }
 
 func TestMatrixFullTokenizerTokenSplitter(t *testing.T) {
diff --git a/src/tokenizer.xfst b/src/tokenizer.xfst
index 8026c01..3d63e5b 100644
--- a/src/tokenizer.xfst
+++ b/src/tokenizer.xfst
@@ -44,15 +44,11 @@
            [%, %,]];
 
 ! Right punctuation - excluding the characters that can be used as apostrophe
-define RP [SP|","|";"|":"|
+define RPS ["”"|"›"|"»"|%"|[%’ %’]|["'" "'"]|[%‘ %‘]];
+define RP [SP|RPS|","|";"|":"|
               ")"|"]"|"}"|
-              "”"|"›"|"»"|
-              %"|
               ! differs
-              ["'" "'"]|
-              "*"|"/"|"_"| ! Can be Markdown
-              ! from book
-              [%‘ %‘]|[%’ %’]];
+              "*"|"/"|"_"]; ! Can be Markdown
 
 define Sym ["-"|"+"|"<"|">"|"*"|"/"|%=|%@|%&];
 define Apos %'|%’|%`;
@@ -221,9 +217,16 @@
   File @-> ... NLout,
   Domain @-> ... NLout,
   Emoji @-> ... NLout
-] .o. [[WS|NL]+ @-> 0 || [ .#. | NLout ] _ ];
+];
 
 echo - Introduce Sentence splitter
-read regex Token .o. [[["."|"!"|"?"]+|"…"] @-> ... NLout \/ NLout _ ];
+! And compose Whitespace ignorance
+read regex Token .o. [
+  SP NLout %" @-> ... NLout \/ _ NLout \%,
+] .o. [
+  SP @-> ... NLout \/ NLout _ NLout \%"
+] .o. [
+  [WS|NL]+ @-> 0 || [ .#. | NLout ] _
+];
 
 ! foma -e "source tokenizer.xfst" -q -s && cat text.txt | flookup tokenizer.fst -x -b
\ No newline at end of file
diff --git a/testdata/tokenizer.datok b/testdata/tokenizer.datok
index 2acbe22..81023a6 100644
--- a/testdata/tokenizer.datok
+++ b/testdata/tokenizer.datok
Binary files differ
diff --git a/testdata/tokenizer.fst b/testdata/tokenizer.fst
index 09c368c..d1632b8 100644
--- a/testdata/tokenizer.fst
+++ b/testdata/tokenizer.fst
Binary files differ
diff --git a/testdata/tokenizer.matok b/testdata/tokenizer.matok
index 3af2920..f332244 100644
--- a/testdata/tokenizer.matok
+++ b/testdata/tokenizer.matok
Binary files differ