Added base/s=t during tokenization (2)

Change-Id: I005be1ef836aa1016219c63c45b4fd2bb0431ffa
diff --git a/t/index/xip_constituency.t b/t/index/xip_constituency.t
index 17e6c7e..afd1e2a 100644
--- a/t/index/xip_constituency.t
+++ b/t/index/xip_constituency.t
@@ -18,14 +18,12 @@
 
 # The length includes the punct - but that doesn't matter
 is($data->{stream}->[0]->[1], '<>:xip/c:PREP$<b>64<i>0<i>3<i>1<b>3', 'Prep phrase');
-is($data->{stream}->[0]->[4], '<>:xip/c:PP$<b>64<i>0<i>30<i>4<b>2', 'pp phrase');
-done_testing;
-__END__
-is($data->{stream}->[0]->[6], '<>:xip/c:TOP$<b>64<i>0<i>129<i>17<b>0', 'top phrase');
-is($data->{stream}->[0]->[7], '<>:xip/c:MC$<b>64<i>0<i>129<i>17<b>1', 'mc phrase');
-
+is($data->{stream}->[0]->[2], '<>:xip/c:PP$<b>64<i>0<i>30<i>4<b>2', 'pp phrase');
+is($data->{stream}->[0]->[4], '<>:xip/c:TOP$<b>64<i>0<i>129<i>17<b>0', 'top phrase');
+is($data->{stream}->[0]->[5], '<>:xip/c:MC$<b>64<i>0<i>129<i>17<b>1', 'mc phrase');
 is($data->{stream}->[-1]->[0], '<>:xip/c:VERB$<b>64<i>124<i>128<i>18<b>4', 'Noun phrase');
 
+
 done_testing;
 
 __END__
diff --git a/t/index/xip_morpho.t b/t/index/xip_morpho.t
index 01aed9e..db59ef3 100644
--- a/t/index/xip_morpho.t
+++ b/t/index/xip_morpho.t
@@ -17,8 +17,8 @@
 like($data->{foundries}, qr!xip/morpho!, 'data');
 like($data->{layerInfos}, qr!xip/l=tokens!, 'data');
 like($data->{layerInfos}, qr!xip/p=tokens!, 'data');
-is($data->{stream}->[0]->[4], 'xip/l:zu', 'Lemma');
-is($data->{stream}->[0]->[5], 'xip/p:PREP', 'POS');
+is($data->{stream}->[0]->[5], 'xip/l:zu', 'Lemma');
+is($data->{stream}->[0]->[6], 'xip/p:PREP', 'POS');
 
 is($data->{stream}->[1]->[3], 'xip/l:letzt', 'Lemma');
 is($data->{stream}->[1]->[4], 'xip/p:ADJ', 'POS');
diff --git a/t/index/xip_sentences.t b/t/index/xip_sentences.t
index b9f5a66..896422e 100644
--- a/t/index/xip_sentences.t
+++ b/t/index/xip_sentences.t
@@ -16,10 +16,10 @@
 
 like($data->{foundries}, qr!xip/sentences!, 'data');
 
-is($data->{stream}->[0]->[1], '-:xip/sentences$<i>1', 'Number of paragraphs');
 is($data->{stream}->[0]->[0], '-:tokens$<i>18', 'Number of tokens');
-is($data->{stream}->[0]->[2], '<>:xip/s:s$<b>64<i>0<i>129<i>17<b>0', 'Text');
-is($data->{stream}->[0]->[3], '_0$<i>0<i>3', 'Position');
+is($data->{stream}->[0]->[1], '-:xip/sentences$<i>1', 'Number of paragraphs');
+is($data->{stream}->[0]->[3], '<>:xip/s:s$<b>64<i>0<i>129<i>17<b>0', 'Text');
+is($data->{stream}->[0]->[4], '_0$<i>0<i>3', 'Position');
 is($data->{stream}->[-1]->[0], '_17$<i>124<i>128', 'Position');
 
 done_testing;