Lucene Backend is now a module (1)
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/KorapDocument.java b/trunk/src/main/java/de/ids_mannheim/korap/KorapDocument.java
new file mode 100644
index 0000000..79700ab
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/KorapDocument.java
@@ -0,0 +1,157 @@
+package de.ids_mannheim.korap;
+
+import java.util.*;
+
+import de.ids_mannheim.korap.util.KorapDate;
+import de.ids_mannheim.korap.document.KorapPrimaryData;
+import de.ids_mannheim.korap.index.FieldDocument;
+
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.annotation.*;
+
+/* Todo:: Author and textClass may be arrays! */
+
+
+/**
+ * Abstract class representing a document in the KorAP index.
+ *
+ * @author ndiewald
+ */
+public abstract class KorapDocument {
+    private KorapPrimaryData primaryData;
+
+    @JsonIgnore
+    public int internalDocID, localDocID;
+
+    @JsonIgnore
+    public String foundry;
+
+    private String author, textClass, corpusID,
+	           pubPlace, ID, title, subTitle;
+    
+    private KorapDate pubDate;
+
+    /**
+     * Set the publication date of the document the match occurs in.
+     *
+     * @param date The date as a KorapDate compatible string representation.
+     * @return A KorapDate object for chaining.
+     * @see KorapDate#Constructor(String)
+     */
+    public KorapDate setPubDate (String date) {
+	//	ObjectMapper mapper = new ObjectMapper();
+	this.pubDate = new KorapDate(date);
+	return this.pubDate;
+    };
+
+    /**
+     * Set the publication date of the document the match occurs in.
+     *
+     * @param date The date as a KorapDate object.
+     * @return A KorapDate object for chaining.
+     * @see KorapDate
+     */
+    public KorapDate setPubDate (KorapDate date) {
+	return (this.pubDate = date);
+    };
+
+    /**
+     * Get the publication date of the document the match occurs in as a KorapDate object.
+     */
+    @JsonIgnore
+    public KorapDate getPubDate () {
+	return this.pubDate;
+    };
+
+    @JsonProperty("pubDate")
+    public String getPubDateString () {
+	return this.pubDate.toDisplay();
+    };
+
+    public void setAuthor (String author) {
+	this.author = author;
+    };
+
+    public String getAuthor () {
+	return this.author;
+    };
+
+    public void setTextClass (String textClass) {
+	this.textClass = textClass;
+    };
+
+    public String getTextClass () {
+	return this.textClass;
+    };
+
+    public void setPubPlace (String pubPlace) {
+	this.pubPlace = pubPlace;
+    };
+
+    public String getPubPlace () {
+	return this.pubPlace;
+    };
+
+    public void setCorpusID (String corpusID) {
+	this.corpusID = corpusID;
+    };
+
+    @JsonProperty("corpusID")
+    public String getCorpusID () {
+	return this.corpusID;
+    };
+
+    public void setID (String ID) {
+	this.ID = ID;
+    };
+
+    @JsonProperty("ID")
+    public String getID () {
+	return this.ID;
+    };
+
+    public void setTitle (String title) {
+	this.title = title;
+    };
+
+    public String getTitle () {
+	return this.title;
+    };
+
+    public void setSubTitle (String subTitle) {
+	this.subTitle = subTitle;
+    };
+
+    public String getSubTitle () {
+	return this.subTitle;
+    };
+
+    @JsonIgnore
+    public void setPrimaryData (String primary) {
+	this.primaryData = new KorapPrimaryData(primary);
+    };
+
+    public void setPrimaryData (KorapPrimaryData primary) {
+	this.primaryData = primary;
+    };
+
+    public String getPrimaryData () {
+	if (this.primaryData == null)
+	    return "";
+	return this.primaryData.toString();
+    };
+
+    public String getPrimaryData (int startOffset) {
+	return this.primaryData.substring(startOffset);
+    };
+
+    public String getPrimaryData (int startOffset, int endOffset) {
+	return this.primaryData.substring(startOffset, endOffset);
+    };
+
+    @JsonIgnore
+    public int getPrimaryDataLength () {
+	return this.primaryData.length();
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/KorapFilter.java b/trunk/src/main/java/de/ids_mannheim/korap/KorapFilter.java
new file mode 100644
index 0000000..68153a8
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/KorapFilter.java
@@ -0,0 +1,172 @@
+package de.ids_mannheim.korap;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+
+import de.ids_mannheim.korap.filter.BooleanFilter;
+import de.ids_mannheim.korap.filter.RegexFilter;
+import de.ids_mannheim.korap.util.KorapDate;
+import org.apache.lucene.index.Term;
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.lucene.search.NumericRangeQuery;
+
+
+/*
+  Todo: WildCardFilter!
+  Support: delete boolean etc.
+  Support: supports foundries
+*/
+
+/**
+ * @author Nils Diewald
+ *
+ * KorapFilter implements a simple API for creating meta queries
+ * constituing Virtual Collections.
+ */
+
+/*
+<request>
+  <query>
+    ...XYZ...
+  </query>
+  <filter>
+    <cond><foundry value="Treetagger" /></cond>
+    <cond><foundry value="MATE" /></cond>
+    <condOr>
+      <cond><textClass value="sports" /></cond>
+      <cond><textClass value="news" /></cond>
+    </condOr>
+    <cond><pubDate till="2009" /></cond>
+    <cond><author regex="Peter .+?" /></cond>
+  </filter>
+</request>
+
+Suche XYZ in allen Documenten in den Foundries "Treetagger" und "MATE", die entweder den Texttyp "sports" oder den Texttyp "news" haben, bis höchsten 2009 publiziert wurden und deren Autor auf den regulären Ausdruck "Peter .+?" matcht.
+
+*/
+
+public class KorapFilter {
+    private KorapFilter filter;
+    private Query query;
+
+    // Logger
+    private final static Logger jlog = LoggerFactory.getLogger(KorapFilter.class);
+
+    /**
+     * Search for documents of a specific genre.
+     * @param genre The name of the genre as a string
+     */
+    public BooleanFilter genre (String genre) {
+	return new BooleanFilter("textClass", new TermQuery(
+            new Term("textClass", genre)
+        ));
+    };
+
+    /**
+     * Search for documents of specific genres.
+     * @param genre The name of the genres as a regular expression.
+     */
+    public BooleanFilter genre (RegexFilter genre) {
+	return new BooleanFilter("textClass", genre.toQuery("textClass"));
+    };
+
+    /**
+     * Search for a documents of specific genres.
+     * @param genre The name of the genre as a string
+     * @param genres The names of further genres as strings
+     *
+     * This method is EXPERIMENTAL and may change without warnings!
+     */
+    public BooleanFilter genre (String genre, String ... genres) {
+	BooleanFilter bf = new BooleanFilter("textClass", new TermQuery(
+            new Term("textClass", genre)
+        ));
+	bf = bf.or(genres);
+	return bf;
+    };
+
+    public RegexFilter re (String value) {
+	return new RegexFilter(value);
+    };
+
+    public Query since (String date) {
+	int since = new KorapDate(date).floor();
+	if (since == 0 || since == KorapDate.BEGINNING)
+	    return (Query) null;
+
+	return NumericRangeQuery.newIntRange("pubDate", since, KorapDate.END, true, true);
+    };
+
+
+    public Query till (String date) {
+	try {
+	    int till =  new KorapDate(date).ceil();
+	    if (till == 0 || till == KorapDate.END)
+		return (Query) null;
+
+	    return NumericRangeQuery.newIntRange("pubDate", KorapDate.BEGINNING, till, true, true);
+	}
+	catch (NumberFormatException e) {
+	    jlog.warn("Parameter of till(date) is invalid");
+	};
+	return (Query) null;
+    };
+
+
+    public Query between (String beginStr, String endStr) {
+	KorapDate beginDF = new KorapDate(beginStr);
+
+	int begin = beginDF.floor();
+
+	int end = new KorapDate(endStr).ceil();
+
+	if (end == 0)
+	    return (Query) null;
+
+	if (begin == KorapDate.BEGINNING && end == KorapDate.END)
+	    return (Query) null;
+
+	if (begin == end) {
+	    return new TermQuery(new Term("pubDate", beginDF.toString()));
+	};
+
+	return NumericRangeQuery.newIntRange("pubDate", begin, end, true, true);
+    };
+
+
+    public Query date (String date) {
+	KorapDate dateDF = new KorapDate(date);
+
+	if (dateDF.year() == 0)
+	    return (Query) null;
+
+	if (dateDF.day() == 0 || dateDF.month() == 0) {
+	    int begin = dateDF.floor();
+	    int end = dateDF.ceil();
+
+	    if (end == 0 || (begin == KorapDate.BEGINNING && end == KorapDate.END))
+		return (Query) null;
+	    
+	    return NumericRangeQuery.newIntRange("pubDate", begin, end, true, true);
+	};
+	
+	return new TermQuery(new Term("pubDate", dateDF.toString()));
+    };
+
+
+    /*
+textClass
+id
+title
+subtitle
+author
+corpus
+pubDate
+pubPlace
+    */
+
+
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/KorapIndex.java b/trunk/src/main/java/de/ids_mannheim/korap/KorapIndex.java
new file mode 100644
index 0000000..42349f6
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/KorapIndex.java
@@ -0,0 +1,671 @@
+package de.ids_mannheim.korap;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.net.URL;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.zip.GZIPInputStream;
+import java.io.FileInputStream;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+
+import com.fasterxml.jackson.annotation.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import org.apache.lucene.util.Version;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.OpenBitSet;
+import org.apache.lucene.search.DocIdSet;
+
+import de.ids_mannheim.korap.index.FieldDocument;
+import de.ids_mannheim.korap.KorapResult;
+import de.ids_mannheim.korap.KorapMatch;
+import de.ids_mannheim.korap.index.PositionsToOffset;
+import de.ids_mannheim.korap.document.KorapPrimaryData;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+
+/*
+
+  Todo: Use FieldCache!
+
+
+  http://invertedindex.blogspot.co.il/2009/04/lucene-dociduid-mapping-and-payload.html
+  see korap/search.java -> retrieveTokens
+
+  Support multiple indices.
+
+  Support frequency search with regular expressions, so multiple bookkeeping:
+  c<:VVFIN:ging:gehen:past::
+  c>:VVFIN:gnig:neheg:past::
+  -> search for frequencies of VVFIN/gehen
+  -> c:VVFIN:[^:]*?:gehen:past:...
+*/
+
+/**
+ * @author Nils Diewald
+ * 
+ * KorapIndex implements a simple API for searching in and writing to a
+ * Lucene index and equesting several information but the index's nature.
+ */
+public class KorapIndex {
+    private Directory directory;
+
+    // Temp:
+    public IndexReader reader;
+
+    private IndexWriter writer;
+    private IndexSearcher searcher;
+    private boolean readerOpen = false;
+    private int commitCounter = 0;
+    private int autoCommit = 500;
+    private HashMap termContexts;
+    private ObjectMapper mapper = new ObjectMapper();
+
+
+    private static ByteBuffer bb = ByteBuffer.allocate(4);
+    private static ByteBuffer bbOffset = ByteBuffer.allocate(8);
+
+
+    private byte[] pl = new byte[4];
+
+    private Set<String> fieldsToLoad;
+
+    // Logger
+    private final static Logger log = LoggerFactory.getLogger(KorapIndex.class);
+
+    public KorapIndex () throws IOException {
+	this((Directory) new RAMDirectory());
+    };
+
+
+    public KorapIndex (String index) throws IOException {
+	this(FSDirectory.open(new File( index )));
+    };
+
+
+    public KorapIndex (Directory directory) throws IOException {
+	this.directory = directory;
+
+	fieldsToLoad = new HashSet<String>();
+	fieldsToLoad.add("author");
+	fieldsToLoad.add("ID");
+	fieldsToLoad.add("title");
+	fieldsToLoad.add("subTitle");
+	fieldsToLoad.add("textClass");
+	fieldsToLoad.add("pubPlace");
+	fieldsToLoad.add("pubDate");
+	fieldsToLoad.add("corpusID");
+
+	// Base analyzer for searching and indexing
+	StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
+
+	// Create configuration with base analyzer
+	IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_CURRENT, analyzer);
+
+	this.writer = new IndexWriter(this.directory, config);
+    };
+
+
+    public void close () throws IOException {
+	this.closeReader();
+	this.closeWriter();
+    };
+
+
+    public IndexReader reader () {
+	if (!readerOpen)
+	    this.openReader();
+
+	return this.reader;
+    };
+
+    public IndexSearcher searcher () {
+	if (this.searcher == null) {
+	    this.searcher = new IndexSearcher(this.reader());
+	};
+	return this.searcher;
+    };
+
+    public void closeWriter () throws IOException {
+	this.writer.close();
+    };
+
+
+    public void closeReader () throws IOException {
+	if (readerOpen) {
+	    this.reader.close();
+	    readerOpen = false;
+	};
+    };
+
+
+    public void openReader () {
+	try {
+	    this.reader = DirectoryReader.open(this.directory);
+	    readerOpen = true;
+	    if (this.searcher != null) {
+		this.searcher = new IndexSearcher(reader);
+	    };
+	}
+
+	catch (IOException e) {
+	    log.warn( e.getLocalizedMessage() );
+	};
+    };
+
+
+    public FieldDocument addDoc (FieldDocument fd) throws IOException {
+	
+	// Add document to writer
+	this.writer.addDocument( fd.doc );
+	if (++commitCounter > autoCommit) {
+	    this.commit();
+	    commitCounter = 0;
+	};
+	return fd;
+    };
+
+    // Add with file!
+    public FieldDocument addDoc (String json) throws IOException {
+	FieldDocument fd = this.mapper.readValue(json, FieldDocument.class);
+	return this.addDoc(fd);
+    };
+
+    public FieldDocument addDoc (File json) throws IOException {
+	FieldDocument fd = this.mapper.readValue(json, FieldDocument.class);
+	return this.addDoc(fd);
+    };
+
+    public FieldDocument addDocFile(String json) throws IOException {
+	return this.addDocFile(json, false);
+    };
+
+    public FieldDocument addDocFile(String json, boolean gzip) throws IOException {
+	if (gzip) {
+	    FieldDocument fd = this.mapper.readValue(new GZIPInputStream(new FileInputStream(json)), FieldDocument.class);
+	    return this.addDoc(fd);
+	};
+	return this.addDoc(json);
+    };
+
+    public void commit () throws IOException {
+	if (commitCounter > 0) {
+	    this.writer.commit();
+	    commitCounter = 0;
+	    this.closeReader();
+	};
+    };
+
+
+    // Get autoCommit valiue
+    public int autoCommit () {
+	return this.autoCommit;
+    };
+
+
+    // Set autoCommit value
+    public void autoCommit (int number) {
+	this.autoCommit = number;
+    };
+
+
+    // Search for meta information in term vectors
+    private long numberOfAtomic (Bits docvec,
+				 AtomicReaderContext atomic,
+				 Term term) throws IOException {
+
+	// This reimplements docsAndPositionsEnum with payloads
+	final Terms terms = atomic.reader().fields().terms(term.field());
+
+	// No terms were found
+	if (terms != null) {
+	    // Todo: Maybe reuse a termsEnum!
+	    final TermsEnum termsEnum = terms.iterator(null);
+
+	    // Set the positioon in the iterator to the term that is seeked
+	    if (termsEnum.seekExact(term.bytes(), true)) {
+
+		// Start an iterator to fetch all payloads of the term
+		DocsAndPositionsEnum docs = termsEnum.docsAndPositions(
+		    docvec,
+		    null,
+		    DocsAndPositionsEnum.FLAG_PAYLOADS
+		);
+
+		// Iterator is empty
+		if (docs.docID() == DocsAndPositionsEnum.NO_MORE_DOCS) {
+		    return 0;
+		};
+
+		// Init some variables for data copying
+		long occurrences = 0;
+		BytesRef payload;
+
+		// Init nextDoc()
+		while (docs.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+
+		    // Go to first term (initialization phase)
+// TODO: THIS MAY BE WRONG!
+		    docs.nextPosition();
+
+		    // Copy payload with the offset of the BytesRef
+		    payload = docs.getPayload();
+		    System.arraycopy(payload.bytes, payload.offset, pl, 0, 4);
+
+		    // Add payload as integer
+		    occurrences += bb.wrap(pl).getInt();
+		};
+
+		// Return the sum of all occurrences
+		return occurrences;
+	    };
+	};
+
+	// Nothing found
+	return 0;
+    };
+
+
+    /**
+     * Search for the number of occurrences of different types,
+     * e.g. "documents", "sentences" etc.
+     *
+     * @param foundry The foundry to search in.
+     * @param type The type of meta information, e.g. "documents" or "sentences".
+     */
+    public long numberOf (String foundry, String type) {
+	// Short cut for documents
+	if (type.equals("documents")) {
+	    return this.reader().numDocs();
+	};
+    
+	// Create search term
+	Term term = new Term(foundry, "-:" + type);
+	// System.err.println(">> Search for -:" + type + " in " + foundry);
+
+
+	long occurrences = 0;
+	try {
+	    // Iterate over all atomic readers and collect occurrences
+	    for (AtomicReaderContext atomic : this.reader().leaves()) {
+		occurrences += this.numberOfAtomic(
+                    atomic.reader().getLiveDocs(),
+		    atomic,
+		    term
+		);
+	    };
+	}
+
+	// Something went wrong
+	catch (IOException e) {
+	    log.warn( e.getLocalizedMessage() );
+	};
+
+	return occurrences;
+    };
+
+    /**
+     * Search for the number of occurrences of different types,
+     * e.g. "documents", "sentences" etc., in the base foundry.
+     *
+     * @param type The type of meta information, e.g. "documents" or "sentences".
+     *
+     * @see #numberOf(String, String)
+     */
+    public long numberOf (String type) throws IOException {
+	return this.numberOf("base", type);
+    };
+
+
+    /**
+     * Search for the number of occurrences of different types,
+     * e.g. "documents", "sentences" etc., in a specific set of documents.
+     *
+     * @param docvec The document vector for filtering the search space.
+     * @param foundry The foundry to search in.
+     * @param type The type of meta information, e.g. "documents" or "sentences".
+     *
+     * @see #numberOf(String, String)
+     */
+    public long numberOf (Bits docvec, String foundry, String type) throws IOException {
+
+	// Shortcut for documents
+	if (type.equals("documents")) {
+	    OpenBitSet os = (OpenBitSet) docvec;
+	    return os.cardinality();
+	};
+    
+	Term term = new Term(foundry, "-:" + type);
+
+	int occurrences = 0;
+	try {
+	    for (AtomicReaderContext atomic : this.reader().leaves()) {
+		occurrences += this.numberOfAtomic(docvec, atomic, term);
+	    };
+	}
+	catch (IOException e) {
+	    log.warn( e.getLocalizedMessage() );
+	};
+
+	return occurrences;
+    };
+
+
+    // Deprecated
+    public long countDocuments () throws IOException {
+	log.warn("countDocuments() is DEPRECATED in favor of numberOf(\"documents\")!");
+	return this.numberOf("documents");
+    };
+
+
+    // Deprecated
+    public long countAllTokens () throws IOException {
+	log.warn("countAllTokens() is DEPRECATED in favor of numberOf(\"tokens\")!");
+	return this.numberOf("tokens");
+    };
+
+
+    /**
+     * search
+     */
+    public KorapResult search (SpanQuery query) {
+	return this.search((Bits) null, query, 0, (short) 10, true, (short) 6, true, (short) 6);
+    };
+
+    public KorapResult search (SpanQuery query,
+			       short count) {
+	return this.search((Bits) null, query, 0, count, true, (short) 6, true, (short) 6);
+    };
+
+    public KorapResult search (Bits bitset,
+			       SpanQuery query,
+			       short count) {
+	return this.search((Bits) bitset, query, 0, count, true, (short) 6, true, (short) 6);
+    };
+
+    public KorapResult search (SpanQuery query,
+			       int startIndex,
+			       short count,
+			       boolean leftTokenContext,
+			       short leftContext,
+			       boolean rightTokenContext,
+			       short rightContext) {
+	return this.search((Bits) null, query, startIndex, count,
+			   leftTokenContext, leftContext, rightTokenContext, rightContext);
+    };
+
+    public KorapResult search (Bits bitset,
+			       SpanQuery query,
+			       int startIndex,
+			       short count,
+			       boolean leftTokenContext,
+			       short leftContext,
+			       boolean rightTokenContext,
+			       short rightContext) {
+
+	this.termContexts = new HashMap<Term, TermContext>();
+	String foundry = query.getField();
+
+	KorapResult kr = new KorapResult(
+	    query.toString(),
+	    startIndex,
+	    count,
+	    leftTokenContext,
+	    leftContext,
+	    rightTokenContext,
+	    rightContext
+        );
+
+	HashSet<String> fieldsToLoadLocal = new HashSet<>(fieldsToLoad);
+	fieldsToLoadLocal.add(foundry);
+
+	try {
+	    for (AtomicReaderContext atomic : this.reader().leaves()) {
+		if (bitset == null)
+		    bitset = atomic.reader().getLiveDocs();
+
+		PositionsToOffset pto = new PositionsToOffset(atomic, foundry);
+
+		// Spans spans = NearSpansOrdered();
+		Spans spans = query.getSpans(atomic, bitset, termContexts);
+
+		IndexReader lreader = atomic.reader();
+
+		// TODO: Get document information from Cache!
+
+		// See: http://www.ibm.com/developerworks/java/library/j-benchmark1/index.html
+		long t1 = System.nanoTime();
+
+		int i = 0;
+		for (; i < kr.itemsPerPage(); i++) {
+
+		    if (spans.next() != true) {
+			break;
+		    };
+		   
+		    if (startIndex > i)
+			continue;
+
+		    int localDocID = spans.doc();
+		    int docID = atomic.docBase + localDocID;
+
+		    Document doc = lreader.document(docID, fieldsToLoadLocal);
+		    KorapMatch match = new KorapMatch();
+
+		    match.startPos = spans.start();
+		    match.endPos = spans.end();
+		    match.localDocID = localDocID;
+
+		    pto.add(localDocID, match.startPos);
+		    pto.add(localDocID, match.endPos - 1);
+
+		    match.leftContext = leftContext;
+		    match.rightContext = rightContext;
+
+		    match.leftTokenContext = leftTokenContext;
+		    match.rightTokenContext = rightTokenContext;
+
+		    // Add pos for context
+		    if (leftTokenContext) {
+			pto.add(localDocID, match.startPos - leftContext);
+		    };
+
+		    // Add pos for context
+		    if (rightTokenContext) {
+			pto.add(localDocID, match.endPos + rightContext - 1);
+		    };
+
+		    if (spans.isPayloadAvailable()) {
+
+			// TODO: Here are offsets and highlight offsets!
+			// <> payloads have 12 bytes (iii) or 8!?
+			// highlightoffsets have 10 bytes (iis)!
+
+			// 11 bytes!!!
+
+			/*
+			int[] offsets = getOffsetsFromPayload(spans.getPayload());
+			match.startOffset(offsets[0]);
+			match.startOffset(offsets[1]);
+			*/
+
+			try {
+			    ByteBuffer bb = ByteBuffer.allocate(10);
+			    for (byte[] b : spans.getPayload()) {
+
+				log.trace("Found a payload!!! with length {}", b.length);
+
+				// Todo element searches!
+
+				// Highlights!
+				if (b.length == 9) {
+				    bb.put(b);
+				    bb.rewind();
+
+				    int start = bb.getInt();
+				    int end = bb.getInt() -1;
+				    byte number = bb.get();
+
+				    log.trace("Have a payload: {}-{}", start, end);
+
+				    // Add this for offset search
+				    pto.add(localDocID, start);
+				    pto.add(localDocID, end);
+
+				    match.addHighlight(start, end, number);
+				}
+
+				// Element payload for match!
+				// This MAY BE the correct match
+				else if (b.length == 8) {
+				    bb.put(b);
+				    bb.rewind();
+
+				    if (match.potentialStartPosChar == -1) {
+					match.potentialStartPosChar = bb.getInt(0);
+				    }
+				    else {
+					if (bb.getInt(0) < match.potentialStartPosChar)
+					match.potentialStartPosChar = bb.getInt(0);
+				    };
+
+				    if (bb.getInt(4) > match.potentialEndPosChar)
+					match.potentialEndPosChar = bb.getInt(4);
+
+				    log.trace("Element payload from {} to {}",
+					      match.potentialStartPosChar,
+					      match.potentialEndPosChar);
+				}
+
+				else if (b.length == 4) {
+				    bb.put(b);
+				    bb.rewind();
+				    log.debug("Unknown[4]: {}", bb.getInt());
+				};
+
+				bb.clear();
+			    };
+
+			}
+
+			catch (Exception e) {
+			}
+
+			// match.payload(spans.getPayload());
+		    };
+
+
+		    match.internalDocID = docID;
+		    match.foundry = foundry;
+
+		    match.setAuthor(doc.get("author"));
+		    match.setTextClass(doc.get("textClass"));
+		    match.setID(doc.get("ID"));
+		    match.setTitle(doc.get("title"));
+		    match.setSubTitle(doc.get("subTitle"));
+		    match.setPubPlace(doc.get("pubPlace"));
+		    match.setCorpusID(doc.get("corpusID"));
+		    match.setPubDate(doc.get("pubDate"));
+
+		    match.setPrimaryData(
+		      new KorapPrimaryData(doc.get(foundry))
+		    );
+
+		    kr.add(match);
+		};
+
+		long t2 = System.nanoTime();
+
+		kr.setBenchmarkSearchResults(t1, t2);
+
+		while (spans.next() == true) {
+		    i++;
+		};
+
+		kr.setBenchmarkHitCounter(t2, System.nanoTime());
+
+		kr.setTotalResults(i);
+
+		for (KorapMatch km : kr.getMatches()) {
+		    km.processHighlight(pto);
+		};
+	    };
+
+	    // if (spans.isPayloadAvailable()) {
+	    // for (byte[] payload : spans.getPayload()) {
+	    // // retrieve payload for current matching span
+	    // payloadString.append(new String(payload));
+	    // payloadString.append(" | ");
+	    // };
+	    // };
+	}
+	catch (IOException e) {
+	    kr.setError("There was an IO error");
+	    log.warn( e.getLocalizedMessage() );
+	};
+
+	return kr;
+    };
+
+
+	/*
+
+    public void getFoundryStatistics {
+- provides statistical information:
+  - which documents have which foundries
+    - Collect all Bitvectors of each foundry and make the intersections afterwards
+  - ???
+    };
+
+
+    public KorapResult search (Bits bits, KorapQuery query) {
+	//	this.search(bits, query);
+    };
+
+    // countAllTokens
+    public int getNumberOfTokens (String corpus) throws IOException {
+	return this.getNumberOf("token", "base", corpus);
+    };
+
+
+    // retrieveTokens(docname, startOffset, endOffset, layer);
+
+
+    /*
+
+
+    // todo mit pagesize und offset
+
+
+    };
+*/
+
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/KorapMatch.java b/trunk/src/main/java/de/ids_mannheim/korap/KorapMatch.java
new file mode 100644
index 0000000..ed2db88
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/KorapMatch.java
@@ -0,0 +1,523 @@
+package de.ids_mannheim.korap;
+import java.util.*;
+
+import com.fasterxml.jackson.annotation.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import de.ids_mannheim.korap.index.PositionsToOffset;
+import static de.ids_mannheim.korap.util.KorapHTML.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/*
+  Todo: The implemented classes and private names are horrible!
+  Refactor, future-me!
+*/
+
+/**
+ * Representation of Matches in a KorapResult.
+ *
+ * @see KorapResult
+ * @author ndiewald
+ */
+public class KorapMatch extends KorapDocument {
+    ObjectMapper mapper = new ObjectMapper();
+
+    // Snippet information
+    @JsonIgnore
+    public short leftContext, rightContext;
+
+    @JsonIgnore
+    public int startPos, endPos;
+
+    @JsonIgnore
+    public int potentialStartPosChar = -1, potentialEndPosChar = -1;
+
+    @JsonIgnore
+    public boolean leftTokenContext, rightTokenContext;
+
+    private String tempSnippet, snippetHTML, snippetBrackets;
+    private HighlightCombinator snippetStack;
+    private boolean startMore = true, endMore = true;
+
+    private Collection<byte[]> payload;
+    private ArrayList<int[]> highlight;
+
+    // Logger
+    private final static Logger log = LoggerFactory.getLogger(KorapMatch.class);
+
+    /**
+     * Insert a highlight for the snippet view by means of positional
+     * offsets and an optional class number.
+     *
+     * @param start  Integer value of a span's positional start offset.
+     * @param end    Integer value of a span's positional end offset.
+     * @param number Optional class number of the highlight.
+     */
+    public void addHighlight (int start, int end, byte number) {
+	this.addHighlight(start, end, (int) number);
+    };
+
+    public void addHighlight (int start, int end, short number) {
+	this.addHighlight(start, end, (int) number);
+    };
+
+    public void addHighlight (int start, int end) {
+	this.addHighlight(start, end, (int) 1);
+    };
+
+    public void addHighlight (int start, int end, int number) {
+	if (this.highlight == null)
+	    this.highlight = new ArrayList<int[]>();
+
+	log.trace("Add highlight of class {} from {} to {}", number, start, end);
+
+	this.highlight.add(new int[]{ start, end, number});
+    };
+
+    /*
+    public JSONObject toJSON() {
+	JSONObject json = new JSONObject();
+	json.put("internalDocID", this.internalDocID);
+	
+	if (this.author != null)
+	    json.put("author", this.author);
+
+	if (this.getPubDate() != null && this.getPubDate().year() > 0)
+	    json.put("pubDate", this.getPubDate().toDisplay());
+
+	if (this.snippetHTML() != null)
+	    json.put("snippet", this.snippetHTML);
+
+	// json.put("primary", this.primary);
+
+	JSONArray pos = new JSONArray();
+	pos.add(this.startPos);
+	pos.add(this.endPos);
+	json.put("position", pos);
+
+	return json;
+    };
+    */
+
+
+    /**
+     * Generates a highlighted snippet for the mach, that can be
+     * retrieved afterwards via snippetHTML() and snippetBrackets().
+     * <p>
+     * The information on offset positions has to be retrieved beforehand
+     * by filling up the PositionsToOffset.
+     *
+     * @param pto The PositionsToOffset object, containing relevant
+     *            positional information for highlighting
+     * @see #snippetHTML()
+     * @see #snippetBrackets()
+     * @see PositionsToOffset
+     */
+    public void processHighlight (PositionsToOffset pto) {
+
+	log.trace("Start highlight processing ...");
+	
+	// Get the list of spans for matches and highlighting
+	LinkedList<int[]> spans = this._processHighlightSpans(
+            pto,
+	    leftTokenContext,
+	    rightTokenContext
+        );
+
+	for (int[] s : spans) {
+	    log.trace(" >> [Spans] Start: {}, End: {}, Class: {}, Dummy: {}", s[0], s[1], s[2], s[3]);
+	};
+
+	ArrayList<int[]> stack = this._processHighlightStack(spans);
+
+	for (int[] s : stack) {
+	    log.trace(" >> [Stack] Start: {}, End: {}, Class: {}, Dummy: {}", s[0], s[1], s[2], s[3]);
+	};
+
+
+	if (this.tempSnippet == null)
+	    return;
+
+	this._processHighlightSnippet(this.tempSnippet, stack);
+
+	/*
+
+	Collection.sort(openList);
+with http://docs.oracle.com/javase/6/docs/api/java/util/Comparator.html
+	*/
+    };
+
+    private class OpeningTagComparator implements Comparator<int[]> {
+	@Override
+	public int compare (int[] arg0, int[] arg1) {
+	    if (arg0[0] > arg1[0]) {
+		return 1;
+	    }
+	    else if (arg0[0] == arg1[0]) {
+		if (arg0[1] > arg1[1])
+		    return -1;
+		return 1;
+	    };
+	    return -1;
+	};
+    };
+
+    private class ClosingTagComparator implements Comparator<int[]> {
+	@Override
+	public int compare (int[] arg0, int[] arg1) {
+	    if (arg0[1] > arg1[1]) {
+		return 1;
+	    }
+	    else if (arg0[1] == arg1[1]) {
+		if (arg0[0] < arg1[0])
+		    return 1;
+		return -1;
+	    };
+	    return -1;
+	};
+    };
+
+    private class HighlightCombinatorElement {
+	private short type;
+	private int number;
+	private String characters;
+
+	public HighlightCombinatorElement (short type, int number) {
+	    this.type = type;
+	    this.number = number;
+	};
+
+	public HighlightCombinatorElement (String characters) {
+	    this.type = 0;
+	    this.characters = characters;
+	};
+
+	public String toHTML () {	    
+	    if (this.type == 1) {
+		StringBuilder sb = new StringBuilder();
+		sb.append("<span class=\"");
+		if (this.number == -1) {
+		    sb.append("korap-match\"");
+		}
+		else {
+		    sb.append("korap-highlight korap-class-")
+			.append(this.number)
+			.append('"');
+		};
+		sb.append('>');
+		return sb.toString();
+	    }
+	    else if (this.type == 2) {
+		return "</span>";
+	    };
+	    return encodeHTML(this.characters);
+	};
+
+	public String toBrackets () {
+	    if (this.type == 1) {
+		StringBuilder sb = new StringBuilder();
+		if (this.number == -1) {
+		    sb.append("[");
+		}
+		else {
+		    sb.append("{");
+		    if (this.number != 0)
+			sb.append(this.number).append(':');
+		};
+		return sb.toString();
+	    }
+	    else if (this.type == 2) {
+		if (this.number == -1) {
+		    return "]";
+		}
+		return "}";
+	    };
+	    return this.characters;
+	};
+
+    };
+
+    private class HighlightCombinator {
+	private LinkedList<HighlightCombinatorElement> combine;
+	private LinkedList<Integer> balanceStack = new LinkedList<>();
+	private ArrayList<Integer> tempStack = new ArrayList<>(32);
+
+	public HighlightCombinator () {
+	    this.combine = new LinkedList<>();
+	};
+
+	public LinkedList<HighlightCombinatorElement> stack () {
+	    return this.combine;
+	};
+
+	public void addString (String characters) {
+	    this.combine.add(new HighlightCombinatorElement(characters));
+	};
+
+	public void addOpen (int number) {
+	    this.combine.add(new HighlightCombinatorElement((short) 1, number));
+	    this.balanceStack.add(number);
+	};
+
+	public void addClose (int number) {
+	    HighlightCombinatorElement lastComb;
+	    this.tempStack.clear();
+	    int eold = this.balanceStack.removeLast();
+	    while (eold != number) {
+		lastComb = this.combine.peekLast();
+		if (lastComb.type == 1 && lastComb.number != eold) {
+		    this.combine.removeLast();
+		}
+		else {
+		    this.combine.add(new HighlightCombinatorElement((short) 2, eold));
+		};
+		tempStack.add(eold);
+		eold = this.balanceStack.removeLast();
+	    };
+	    
+	    lastComb = this.combine.peekLast();
+	    if (lastComb.type == 1 && lastComb.number == number) {
+		this.combine.removeLast();
+	    }
+	    else {
+		this.combine.add(new HighlightCombinatorElement((short) 2, number));
+	    };
+	    
+	    for (int e : tempStack) {
+		combine.add(new HighlightCombinatorElement((short) 1, e));
+		balanceStack.add(e);
+	    };
+	};
+
+	public String toString () {
+	    StringBuilder sb = new StringBuilder();
+	    for (HighlightCombinatorElement e : combine) {
+		sb.append(e.toString()).append("\n");
+	    };
+	    return sb.toString();
+	};
+    };
+
+    private void _processHighlightSnippet (String clean, ArrayList<int[]> stack) {
+	int pos = 0;
+	int oldPos = 0;
+
+	log.trace("Create Snippet");
+
+	this.snippetStack = new HighlightCombinator();
+
+	for (int[] element : stack) {
+	    pos = element[3] != 0 ? element[0] : element[1];
+
+	    if (pos != oldPos) {
+		snippetStack.addString(clean.substring(oldPos, pos));
+
+		oldPos = pos;
+	    };
+
+	    if (element[3] != 0) {
+		snippetStack.addOpen(element[2]);
+	    }
+	    else {
+		snippetStack.addClose(element[2]);
+	    };
+	};
+
+	snippetStack.addString(clean.substring(pos));
+    };
+
+    @Deprecated
+    public String snippetHTML () {
+	return this.getSnippetHTML();
+    };
+
+    @JsonProperty("snippet")
+    public String getSnippetHTML () {
+	if (this.snippetHTML != null)
+	    return this.snippetHTML;
+
+	StringBuilder sb = new StringBuilder();
+	if (startMore)
+	    sb.append("<span class=\"korap-more-left\"></span>");
+
+	for (HighlightCombinatorElement hce : this.snippetStack.stack()) {
+	    sb.append(hce.toHTML());
+	};
+
+	if (endMore)
+	    sb.append("<span class=\"korap-more-right\"></span>");
+
+	return (this.snippetHTML = sb.toString());
+    };
+
+    @Deprecated
+    public String snippetBrackets () {
+	return this.getSnippetBrackets();
+    };
+    
+    @JsonIgnore
+    public String getSnippetBrackets () {
+	if (this.snippetBrackets != null)
+	    return this.snippetBrackets;
+
+	StringBuilder sb = new StringBuilder();
+
+	if (startMore)
+	    sb.append("... ");
+
+	for (HighlightCombinatorElement hce : this.snippetStack.stack()) {
+	    sb.append(hce.toBrackets());
+	};
+
+	if (endMore)
+	    sb.append(" ...");
+
+	return (this.snippetBrackets = sb.toString());
+    };
+
+
+    // Todo: Not very fast - just a direct translation of the perl script
+    private ArrayList<int[]> _processHighlightStack (LinkedList<int[]> spans) {
+
+	log.trace("Create Stack");
+
+
+	LinkedList<int[]> openList  = new LinkedList<int[]>();
+	LinkedList<int[]> closeList = new LinkedList<int[]>();
+
+	openList.addAll(spans);
+	closeList.addAll(spans);
+
+	Collections.sort(openList, new OpeningTagComparator());
+	Collections.sort(closeList, new ClosingTagComparator());
+
+	ArrayList<int[]> stack = new ArrayList<>(openList.size() * 2);
+
+	while (!openList.isEmpty() || !closeList.isEmpty()) {
+
+	    if (openList.isEmpty()) {
+		stack.addAll(closeList);
+		break;
+	    };
+
+	    if (openList.peekFirst()[0] < closeList.peekFirst()[1]) {
+		int[] e = openList.removeFirst().clone();
+		e[3] = 1;
+		stack.add(e);
+	    }
+	    else {
+		stack.add(closeList.removeFirst());
+	    };
+	};
+	return stack;
+    };
+
+
+    private LinkedList<int[]> _processHighlightSpans (PositionsToOffset pto,
+						      boolean leftTokenContext,
+						      boolean rightTokenContext) {
+	int startOffsetChar,
+	    endOffsetChar,
+	    startPosChar,
+	    endPosChar;
+
+	log.trace("Create Spans");
+
+	int ldid = this.localDocID;
+
+	// Match position
+	startPosChar = pto.start(ldid, this.startPos);
+
+	// Check potential differing start characters
+	// e.g. from element spans
+	if (potentialStartPosChar != -1 && startPosChar > potentialStartPosChar)
+	    startPosChar = potentialStartPosChar;
+
+	endPosChar = pto.end(ldid, this.endPos - 1);
+
+	if (endPosChar < potentialEndPosChar)
+	    endPosChar = potentialEndPosChar;
+
+	log.trace("Matchposition: {}-{}", startPosChar, endPosChar);
+
+	// left context
+	if (leftTokenContext) {
+	    startOffsetChar = pto.start(ldid, startPos - this.leftContext);
+	}
+	else {
+	    startOffsetChar = startPosChar - this.leftContext;
+	};
+
+	// right context
+	if (rightTokenContext) {
+	    endOffsetChar = pto.end(ldid, this.endPos + this.rightContext - 1);
+	    log.trace("For endOffset {} pto returns {}", (this.endPos + this.rightContext - 1), endOffsetChar);
+	}
+	else {
+	    if (endPosChar == -1) {
+		endOffsetChar = -1;
+	    }
+	    else {
+		endOffsetChar = endPosChar + this.rightContext;
+	    };
+	};
+
+	// This can happen in case of non-token characters in the match and null offsets
+	if (startOffsetChar > startPosChar) {
+	    startOffsetChar = startPosChar;
+	}
+	else if (startOffsetChar < 0) {
+	    startOffsetChar = 0;
+	};
+
+	// No ... at the beginning
+	if (startOffsetChar == 0) {
+	    startMore = false;
+	};
+
+	if (endOffsetChar != -1 && endOffsetChar < endPosChar)
+	    endOffsetChar = endPosChar;
+
+
+	log.trace("Offsetposition {} till {} with contexts {} and {}", startOffsetChar, endOffsetChar, leftContext, rightContext);
+
+
+	if (endOffsetChar > -1 && endOffsetChar < this.getPrimaryDataLength()) {
+	    this.tempSnippet = this.getPrimaryData(startOffsetChar, endOffsetChar);
+	}
+	else {
+	    this.tempSnippet = this.getPrimaryData(startOffsetChar);
+	    endMore = false;
+	};
+
+	log.trace("Temporary snippet is {}", this.tempSnippet);
+
+        LinkedList<int[]> spans = new LinkedList<int[]>();
+
+	spans.add(new int[]{ startPosChar - startOffsetChar, endPosChar - startOffsetChar, -1, 0});
+
+	// highlights
+	if (this.highlight != null) {
+	    for (int[] highlight : this.highlight) {
+
+		int start = pto.start(ldid, highlight[0]) - startOffsetChar;
+		int end = pto.end(ldid, highlight[1]) - startOffsetChar;
+
+		if (start == -1 & end == -1)
+		    continue;
+
+		spans.add(new int[]{
+			start,
+			end,
+			highlight[2],
+			0 // Dummy value for later
+		    });
+	    };
+	};
+
+	return spans;
+    };
+
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/KorapQuery.java b/trunk/src/main/java/de/ids_mannheim/korap/KorapQuery.java
new file mode 100644
index 0000000..82b67bd
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/KorapQuery.java
@@ -0,0 +1,233 @@
+package de.ids_mannheim.korap;
+
+import org.apache.lucene.search.spans.SpanQuery;
+
+import de.ids_mannheim.korap.query.wrap.*;
+import org.apache.lucene.util.automaton.RegExp;
+
+import java.util.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * @author Nils Diewald
+ *
+ * KorapQuery implements a simple API for wrapping
+ * KorAP Index I specific query classes.
+ */
+public class KorapQuery {
+    private String field;
+
+    // Logger
+    private final static Logger log = LoggerFactory.getLogger(KorapQuery.class);
+
+    /**
+     * Constructs a new base object for query generation.
+     * @param field The specific index field for the query.
+     */
+    public KorapQuery (String field) {
+	this.field = field;
+    };
+
+
+    // SpanSegmentRegexQuery
+    /**
+     * Create a query object based on a regular expression.
+     * @param re The regular expession as a string.
+     */
+    public SpanRegexQueryWrapper re (String re) {
+	return new SpanRegexQueryWrapper(this.field, re, RegExp.ALL, false);
+    };
+
+    /**
+     * Create a query object based on a regular expression.
+     * @param re The regular expession as a string.
+     * @param flas The regular expession flag as an integer.
+     */
+    public SpanRegexQueryWrapper re (String re, int flags) {
+	return new SpanRegexQueryWrapper(this.field, re, flags, false);
+    };
+
+
+    /**
+     * Create a query object based on a regular expression.
+     * @param re The regular expession as a string.
+     * @param flag The regular expession flag.
+     * @param caseinsensitive A boolean value indicating case insensitivity.
+     */
+    public SpanRegexQueryWrapper re (String re, int flags, boolean caseinsensitive) {
+	return new SpanRegexQueryWrapper(this.field, re, flags, caseinsensitive);
+    };
+
+
+    /**
+     * Create a query object based on a regular expression.
+     * @param re The regular expession as a string.
+     * @param caseinsensitive A boolean value indicating case insensitivity.
+     */
+    public SpanRegexQueryWrapper re (String re, boolean caseinsensitive) {
+	return new SpanRegexQueryWrapper(this.field, re, RegExp.ALL, caseinsensitive);
+    };
+
+
+    // SpanSegmentQueries
+    /**
+     * Create a segment query object.
+     */
+    public SpanSegmentQueryWrapper seg () {
+	return new SpanSegmentQueryWrapper(this.field);
+    };
+
+
+    /**
+     * Create a segment query object.
+     * @param terms[] An array of terms, the segment consists of.
+     */
+    public SpanSegmentQueryWrapper seg (SpanRegexQueryWrapper ... terms) {
+	SpanSegmentQueryWrapper ssq = new SpanSegmentQueryWrapper(this.field);
+	for (SpanRegexQueryWrapper t : terms) {
+	    ssq.with(t);
+	};
+	return ssq;
+    };
+
+    public SpanSegmentQueryWrapper seg (SpanAlterQueryWrapper ... terms) {
+	SpanSegmentQueryWrapper ssq = new SpanSegmentQueryWrapper(this.field);
+	for (SpanAlterQueryWrapper t : terms) {
+	    ssq.with(t);
+	};
+	return ssq;
+    };
+
+    public SpanSegmentQueryWrapper seg (String ... terms) {
+	SpanSegmentQueryWrapper ssq = new SpanSegmentQueryWrapper(this.field);
+	for (String t : terms) {
+	    ssq.with(t);
+	};
+	return ssq;
+    };
+
+    // SpanSegmentAlterQueries
+    /**
+     * Create a segment alternation query object.
+     * @param terms[] An array of alternative terms.
+     */
+    public SpanAlterQueryWrapper or (SpanQueryWrapperInterface ... terms) {
+	SpanAlterQueryWrapper ssaq = new SpanAlterQueryWrapper(this.field);
+	for (SpanQueryWrapperInterface t : terms) {
+	    ssaq.or(t);
+	};
+	return ssaq;
+    };
+
+    public SpanAlterQueryWrapper or (String ... terms) {
+	SpanAlterQueryWrapper ssaq = new SpanAlterQueryWrapper(this.field);
+	for (String t : terms) {
+	    ssaq.or(t);
+	};
+	return ssaq;
+    };
+
+
+    // SpanSegmentSequenceQueries
+    /**
+     * Create a sequence of segments query object.
+     */
+    public SpanSequenceQueryWrapper seq () {
+	return new SpanSequenceQueryWrapper(this.field);
+    };
+
+
+    /**
+     * Create a sequence of segments query object.
+     * @param terms[] An array of segment defining terms.
+     */
+    public SpanSequenceQueryWrapper seq (SpanQueryWrapperInterface ... terms) {
+	SpanSequenceQueryWrapper sssq = new SpanSequenceQueryWrapper(this.field);
+	for (SpanQueryWrapperInterface t : terms) {
+	    sssq.append(t);
+	};
+	return sssq;
+    };
+
+
+    /**
+     * Create a sequence of segments query object.
+     * @param re A SpanSegmentRegexQuery, starting the sequence.
+     */
+    public SpanSequenceQueryWrapper seq (SpanRegexQueryWrapper re) {
+	return new SpanSequenceQueryWrapper(this.field, re);
+    };
+
+
+    public SpanSequenceQueryWrapper seq (Object ... terms) {
+	SpanSequenceQueryWrapper ssq = new SpanSequenceQueryWrapper(this.field);
+	for (Object t : terms) {
+	    if (t instanceof SpanQueryWrapperInterface) {
+		ssq.append((SpanQueryWrapperInterface) t);
+	    }
+	    else if (t instanceof SpanRegexQueryWrapper) {
+		ssq.append((SpanRegexQueryWrapper) t);
+	    }
+	    else {
+		log.error("{} is not an acceptable parameter for seq()", t.getClass());
+		return ssq;
+	    };
+	};
+	return ssq;
+    };
+
+    public SpanElementQueryWrapper tag (String element) {
+	return new SpanElementQueryWrapper(this.field, element);
+    };
+
+    /**
+     * Create a wrapping within query object.
+     * @param element A SpanQuery.
+     * @param embedded A SpanQuery that is wrapped in the element.
+     */
+    public SpanWithinQueryWrapper within (SpanQueryWrapperInterface element,
+					  SpanQueryWrapperInterface embedded) {
+	return new SpanWithinQueryWrapper(element, embedded);
+    };
+
+
+
+    // Class
+    public SpanClassQueryWrapper _ (byte number, SpanQueryWrapperInterface element) {
+	return new SpanClassQueryWrapper(element, number);
+    };
+
+    public SpanClassQueryWrapper _ (int number, SpanQueryWrapperInterface element) {
+	return new SpanClassQueryWrapper(element, number);
+    };
+
+    public SpanClassQueryWrapper _ (short number, SpanQueryWrapperInterface element) {
+	return new SpanClassQueryWrapper(element, number);
+    };
+
+    public SpanClassQueryWrapper _ (SpanQueryWrapperInterface element) {
+	return new SpanClassQueryWrapper(element);
+    };
+
+    // MatchModify
+    public SpanMatchModifyQueryWrapper shrink (byte number, SpanQueryWrapperInterface element) {
+	return new SpanMatchModifyQueryWrapper(element, number);
+    };
+
+    public SpanMatchModifyQueryWrapper shrink (int number, SpanQueryWrapperInterface element) {
+	return new SpanMatchModifyQueryWrapper(element, number);
+    };
+
+    public SpanMatchModifyQueryWrapper shrink (short number, SpanQueryWrapperInterface element) {
+	return new SpanMatchModifyQueryWrapper(element, number);
+    };
+
+    public SpanMatchModifyQueryWrapper shrink (SpanQueryWrapperInterface element) {
+	return new SpanMatchModifyQueryWrapper(element);
+    };
+
+    // split
+
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/KorapResult.java b/trunk/src/main/java/de/ids_mannheim/korap/KorapResult.java
new file mode 100644
index 0000000..501092d
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/KorapResult.java
@@ -0,0 +1,159 @@
+package de.ids_mannheim.korap;
+
+import java.util.*;
+import de.ids_mannheim.korap.KorapMatch;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.*;
+import com.fasterxml.jackson.annotation.*;
+
+public class KorapResult {
+    ObjectMapper mapper = new ObjectMapper();
+
+    public static final short ITEMS_PER_PAGE = 25;
+    private String query;
+
+    private List<KorapMatch> matches;
+
+    private int totalResults = 0;
+    private int startIndex = 0;
+
+    private short itemsPerPage = ITEMS_PER_PAGE;
+    private short leftContextOffset = 6, rightContextOffset = 6;
+    private boolean leftTokenContext, rightTokenContext;
+
+    private String benchmarkSearchResults = "", benchmarkHitCounter = "0";
+    private String error = null;
+
+    // Logger
+    private final static Logger log = LoggerFactory.getLogger(KorapMatch.class);
+
+    public KorapResult (String query,
+			int startIndex,
+			short itemsPerPage,
+			boolean leftTokenContext,
+			short leftContextOffset,
+			boolean rightTokenContext,
+			short rightContextOffset) {
+
+	mapper.enable(SerializationFeature.INDENT_OUTPUT);
+	// mapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS);
+	mapper.disable(SerializationFeature.WRITE_NULL_MAP_VALUES);
+
+	this.matches = new ArrayList<>();
+	this.query = query;
+	this.startIndex = startIndex;
+	this.itemsPerPage = (itemsPerPage > 50 || itemsPerPage < 1) ? ITEMS_PER_PAGE : itemsPerPage;
+	this.leftContextOffset = leftContextOffset;
+	this.rightContextOffset = rightContextOffset;
+
+	this.leftTokenContext = leftTokenContext;
+	this.rightTokenContext = rightTokenContext;
+    };
+
+    public void add (KorapMatch km) {
+	this.matches.add(km);
+    };
+
+    public void setTotalResults (int i) {
+	this.totalResults = i;
+    };
+
+    public int getTotalResults () {
+	return this.totalResults;
+    };
+
+    @Deprecated
+    public int totalResults () {
+	return this.totalResults;
+    };
+
+    public short getItemsPerPage () {
+	return this.itemsPerPage;
+    };
+
+    @Deprecated
+    public short itemsPerPage () {
+	return this.itemsPerPage;
+    };
+
+    public String getError () {
+	return this.error;
+    };
+
+    public void setError (String msg) {
+	this.error = msg;
+    };
+
+    public void setBenchmarkSearchResults (long t1, long t2) {
+	this.benchmarkSearchResults = (t2 - t1) * 1e-6 + " ms";
+    };
+
+    public String getBenchmarkSearchResults () {
+	return this.benchmarkSearchResults;
+    };
+
+    public void setBenchmarkHitCounter (long t1, long t2) {
+	this.benchmarkHitCounter = (t2 - t1) * 1e-6 + " ms";
+    };
+
+    public String getBenchmarkHitCounter () {
+	return this.benchmarkHitCounter;
+    };
+
+    public String getQuery () {
+	return this.query;
+    };
+
+    public KorapMatch getMatch (int index) {
+	return this.matches.get(index);
+    };
+
+    public List<KorapMatch> getMatches () {
+	return this.matches;
+    };
+
+    @Deprecated
+    public KorapMatch match (int index) {
+	return this.matches.get(index);
+    };
+
+    public int getStartIndex () {
+	return startIndex;
+    };
+
+    public String toJSON () {
+	
+	//	ObjectNode json = (ObjectNode) mapper.createObjectNode();
+	// ObjectNode json = (ObjectNode) mapper.treeAsTokens(this);
+
+	ObjectNode json =  (ObjectNode) mapper.valueToTree(this);
+
+	ArrayNode leftContext = mapper.createArrayNode();
+	leftContext.add(this.leftTokenContext ? "token" : "char");
+	leftContext.add(this.leftContextOffset);
+
+	ArrayNode rightContext = mapper.createArrayNode();
+	rightContext.add(this.rightTokenContext ? "token" : "char");
+	rightContext.add(this.rightContextOffset);
+
+	ObjectNode context = mapper.createObjectNode();
+	context.put("left", leftContext);
+	context.put("right", rightContext);
+	json.put("context", context);
+
+	try {
+	    return mapper.writeValueAsString(json); // mapper.writeValueAsString(treeMapper);
+	}
+	catch (Exception e) {
+	    log.warn(e.getLocalizedMessage());
+	};
+
+	return "{}";
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTerm.java b/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTerm.java
new file mode 100644
index 0000000..9c9e306
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTerm.java
@@ -0,0 +1,290 @@
+package de.ids_mannheim.korap.analysis;
+
+import static de.ids_mannheim.korap.util.KorapArray.*;
+import org.apache.lucene.util.BytesRef;
+import java.nio.ByteBuffer;
+import java.util.*;
+
+
+/**
+ * @author Nils Diewald
+ * @version 0.2
+ *
+ * MultiTerm represents a term in a MultiTermToken.
+ */
+public class MultiTerm {
+    public int start, end = 0;
+    public String term = null;
+    public Integer posIncr = 1;
+    public boolean storeOffsets = false;
+    public BytesRef payload = null;
+
+    /**
+     * The constructor.
+     *
+     * @param term The term surface.
+              Offsets can be written as an appended and dash separated pair of integers,
+	      payloads can be written following a dollar sign.
+	      payloads can be typed as being a short (s), an integer (i), or a long (l)
+	      in leading angular brackets. All other payloads are treated as being UTF-8
+	      characer sequences.
+	      
+	      Examples:
+	      MultiTerm test = new MultiTerm("test");
+	      MultiTerm test = new MultiTerm("test#0-4");
+	      MultiTerm test = new MultiTerm("test#0-4$Example");
+	      MultiTerm test = new MultiTerm("test#0-4$&lt;i&gt;1278");
+     */
+    public MultiTerm (String term) {
+	/*
+	this.start = this.end = 0;
+	this.storeOffsets = false;
+	this.payload = null;
+	*/
+	_fromString(term);
+    };
+
+    /**
+     * The constructor with a separated prefix.
+     * new MultiTerm('a', "bcd") is equivalent to
+     * new MultiTerm("a:bcd");
+     *
+     * @param prefix A special prefix for the term.
+     * @param term The term surface.
+     *
+     * @see #MultiTerm(String)
+     */
+    public MultiTerm (char prefix, String term) {
+	StringBuilder sb = new StringBuilder();
+	/*
+	this.start = this.end = 0;
+	this.storeOffsets = false;
+	this.payload = null;
+	*/
+	sb.append(prefix).append(':').append(term);
+	_fromString(sb.toString());
+    };
+
+    public void term (String term) {
+	this.term = term;
+    };
+
+    public String term () {
+	return this.term;
+    };
+
+    /**
+     * The constructor.
+     */
+    public MultiTerm () {
+	this.term = "";
+	/*
+	this.start = this.end = 0;
+	this.storeOffsets = false;
+	this.payload = null;
+	*/
+    };
+
+    public void payload (Byte pl) {
+	this.payload = new BytesRef( ByteBuffer.allocate(1).put(pl).array());
+    };
+
+    public void payload (short pl) {
+	this.payload = new BytesRef( ByteBuffer.allocate(2).putShort(pl).array());
+    };
+
+    public void payload (int pl) {
+	this.payload = new BytesRef( ByteBuffer.allocate(4).putInt(pl).array());
+    };
+
+    public void payload (long pl) {
+	this.payload = new BytesRef( ByteBuffer.allocate(8).putLong(pl).array());
+    };
+
+    public void payload (String pl) {
+	this.payload = new BytesRef(pl);
+    };
+
+    public void payload (byte[] pl) {
+	this.payload = new BytesRef(pl);
+    };
+
+    public void payload (BytesRef pl) {
+	this.payload = pl;
+    };
+
+    public BytesRef payload () {
+	return this.payload;
+    };
+
+    public void start (int value) {
+	this.start = value;
+    };
+
+    public int start () {
+	return this.start;
+    };
+
+    public void end (int value) {
+	this.end = value;
+    };
+
+    public int end () {
+	return this.end;
+    };
+
+    public boolean storeOffsets () {
+	return this.storeOffsets;
+    };
+
+    public void storeOffsets (boolean value) {
+	this.storeOffsets = value;
+    };
+
+    private void _fromString (String term) {
+	String[] termSurface = term.split("\\$", 2);
+
+	// Payload is given
+	if (termSurface.length == 2) {
+	    String payloadStr = termSurface[1];
+
+	    // Payload has a type
+	    if (payloadStr.charAt(0) == '<' && payloadStr.charAt(2) == '>') {
+		ByteBuffer bb = ByteBuffer.allocate(8);
+
+		String[] pls = payloadStr.split("(?=<)|(?<=>)");
+		int l = 0;
+
+		for (int i = 1; i < pls.length;) {
+
+		    // Resize the buffer
+		    if ((bb.capacity() - l) < 8) {
+			bb = ByteBuffer.allocate(bb.capacity() + 8).put(bb.array());
+			bb.position(l);
+		    };
+		    switch (pls[i]) {
+		    case "<b>": // byte
+			bb.put(Byte.parseByte(pls[i+1]));
+			l++;
+			break;
+		    case "<s>":
+			bb.putShort(Short.parseShort(pls[i+1]));
+			l+=2;
+			break;
+		    case "<i>":
+			bb.putInt(Integer.parseInt(pls[i+1]));
+			l+=4;
+			break;
+		    case "<l>":
+			bb.putLong(Long.parseLong(pls[i+1]));
+			l+=8;
+			break;
+		    };
+		    i+=2;
+		};
+		byte[] bytes = new byte[l];
+		System.arraycopy(bb.array(), 0, bytes, 0, l);
+		this.payload = new BytesRef(bytes);
+
+
+		/*
+		payloadStr = payloadStr.substring(3, payloadStr.length());
+		switch (type) {
+		case 'b':  // byte
+
+		    System.err.println("bbb");
+		    payloadBytes = ByteBuffer.allocate(1).put(new Byte(payloadStr)).array();
+		    break;
+		case 's':  // short
+		    payloadBytes = ByteBuffer.allocate(2).putShort(
+								   Short.parseShort(payloadStr)
+								   ).array();
+		    break;
+		case 'i': // integer
+		    payloadBytes = ByteBuffer.allocate(4).putInt(
+								 Integer.parseInt(payloadStr)
+								 ).array();
+		    break;
+		case 'l': // long
+		    payloadBytes = ByteBuffer.allocate(8).putLong(
+								  Long.parseLong(payloadStr)
+								  ).array();
+		    break;
+		};
+		TODO:
+		case '?': // arbitrary
+		    payloadStr = 
+		*/
+	    }
+
+	    // Payload is a string
+	    else {
+		this.payload = new BytesRef(payloadStr);
+	    };
+	};
+	String[] stringOffset = termSurface[0].split("\\#", 2);
+	if (stringOffset.length == 2) {
+	    String[] offset = stringOffset[1].split("\\-", 2);
+
+	    if (offset.length == 2 && offset[0].length() > 0) {
+		this.start = Integer.parseInt(offset[0]);
+		this.end   = Integer.parseInt(offset[1]);
+	    /*
+	    }
+	    else {
+		this.storeOffsets(false);
+	    */
+	    };
+	};
+	this.term = stringOffset[0];
+    };
+
+
+    /**
+     * Represent the MultiTerm as a string.
+     * Offsets are attached following a hash sign,
+     * payloads are attached following a dollar sign.
+     * All payloads are written as UTF-8 character sequences.
+     *
+     * @see #toStringShort().
+     */
+    public String toString () {
+	StringBuilder sb = new StringBuilder(this.term);
+	if (this.start != this.end) {
+	    sb.append('#').append(this.start).append('-').append(this.end);
+	/*
+	}
+	else if (!this.storeOffsets()) {
+	    sb.append("#-");
+	*/
+	};
+
+	if (this.payload != null) {
+	    sb.append('$');
+	    try {
+		sb.append(this.payload.utf8ToString());
+	    }
+	    catch (AssertionError e) {
+		sb.append("<?>").append(join(',', this.payload.toString().split(" ")));
+	    };
+	};
+
+	return sb.toString();
+    };
+
+    /**
+     * Represent the MultiTerm as a string.
+     * Payloads are attached following a dollar sign.
+     * All payloads are written as UTF-8 character sequences.
+     * Offsets are neglected.
+     * 
+     * @see #toString().
+     */
+    public String toStringShort () {
+	StringBuilder sb = new StringBuilder(this.term);
+	if (this.payload != null) {
+	    sb.append('$').append(this.payload.utf8ToString());
+	};
+	return sb.toString();
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTermToken.java b/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTermToken.java
new file mode 100644
index 0000000..ff70996
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTermToken.java
@@ -0,0 +1,122 @@
+package de.ids_mannheim.korap.analysis;
+
+import de.ids_mannheim.korap.analysis.MultiTerm;
+import java.util.*;
+
+/*
+  Todo:
+  - Always write offsets to payloads!
+  - Offsets can be overwritten!
+  - Check that terms are not ""!!!
+*/
+
+/**
+ * @author Nils Diewald
+ *
+ * MultiTermToken represents a segment in a MultiTermTokenStream.
+ */
+public class MultiTermToken {
+    public int start, end = 0;
+    public List<MultiTerm> terms;
+
+    public MultiTermToken (MultiTerm term, MultiTerm ... moreTerms) {
+	this.terms = new ArrayList<MultiTerm>();
+
+	if (term.start != term.end) {
+	    this.start = term.start;
+	    this.end = term.end;
+	};
+
+	term.posIncr = 1;
+	terms.add( term );
+
+	// Further elements on same position
+	for (int i = 0; i < moreTerms.length; i++) {
+	    term = moreTerms[i];
+	    term.posIncr = 0;
+	    terms.add(term);
+	};
+    };
+
+    public MultiTermToken (char prefix, String surface) {
+	this.terms = new ArrayList<MultiTerm>();
+
+	MultiTerm term = new MultiTerm(prefix, surface);
+
+	if (term.start != term.end) {
+	    this.start = term.start;
+	    this.end = term.end;
+	};
+
+	// First word element
+	term.posIncr = 1;
+	terms.add( term );
+    };
+
+
+    public MultiTermToken (String surface, String ... moreTerms) {
+	this.terms = new ArrayList<MultiTerm>();
+
+	MultiTerm term = new MultiTerm(surface);
+
+	if (term.start != term.end) {
+	    this.start = term.start;
+	    this.end = term.end;
+	};
+
+	// First word element
+	term.posIncr = 1;
+	terms.add( term );
+
+
+	// Further elements on same position
+	for (int i = 0; i < moreTerms.length; i++) {
+
+	    term = new MultiTerm( moreTerms[i] );
+	    term.posIncr = 0;
+	    terms.add(term);
+	};
+    };
+
+    public void add (MultiTerm mt) {
+	terms.add(mt);
+    };
+
+    public void add (String term) {
+	MultiTerm mt = new MultiTerm(term);
+	mt.posIncr = 0;
+	terms.add(mt);
+    };
+
+    public void add (char prefix, String term) {
+	MultiTerm mt = new MultiTerm(prefix, term);
+	mt.posIncr = 0;
+	terms.add(mt);
+    };
+
+    public void offset (int start, int end) {
+	this.start = start;
+	this.end   = end;
+    };
+
+    public String toString () {
+	StringBuffer sb = new StringBuffer();
+
+	sb.append('[');
+	if (this.start != this.end) {
+	    sb.append('(').append(this.start).append('-').append(this.end).append(')');
+	};
+
+	int i = 0;
+	for (; i < this.terms.size() - 1; i++) {
+	    sb.append(this.terms.get(i).toStringShort()).append('|');
+	};
+	sb.append(this.terms.get(i).toStringShort()).append(']');
+
+	return sb.toString();
+    };
+
+    public int size () {
+	return this.terms.size();
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTermTokenStream.java b/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTermTokenStream.java
new file mode 100644
index 0000000..f0a155a
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/analysis/MultiTermTokenStream.java
@@ -0,0 +1,226 @@
+package de.ids_mannheim.korap.analysis;
+
+import de.ids_mannheim.korap.analysis.MultiTerm;
+import de.ids_mannheim.korap.analysis.MultiTermToken;
+import static de.ids_mannheim.korap.util.KorapByte.*;
+import org.apache.lucene.util.BytesRef;
+
+import java.util.*;
+import java.util.regex.*;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+// import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/*
+  Todo:
+  - Do not use offsetAttr!
+#  - Payload is [4ByteStartOffset][14BitEndOffset-startOffset][1BitBooleanIfSpan][1BitBooleanIfOpen]
+  - Payload is [4ByteOffsetStart][4ByteOffsetStart]
+*/
+
+/**
+ * @author Nils Diewald
+ *
+ * MultiTermTokenStream extends Lucenes TokenStream class to work with MultiTermTokens.
+ *
+ * @see org.apache.lucene.analysis.TokenStream
+ */
+public class MultiTermTokenStream extends TokenStream {
+    private CharTermAttribute charTermAttr;
+    //     private OffsetAttribute offsetAttr;
+    private PositionIncrementAttribute posIncrAttr;
+    private PayloadAttribute payloadAttr;
+
+    private static Pattern pattern = Pattern.compile("\\[(\\(([0-9]+)-([0-9]+)\\))?([^\\]]+?)\\]");
+
+    private List<MultiTermToken> multiTermTokens;
+    private int mttIndex = 0;
+    private int mtIndex  = 0;
+    //    private TokenTextGenerator ttGen = new TokenTextGenerator();
+
+    private final Logger log = LoggerFactory.getLogger(MultiTermTokenStream.class);
+
+    public MultiTermTokenStream () {
+	//	this.offsetAttr   = this.addAttribute(OffsetAttribute.class);
+        this.charTermAttr = this.addAttribute(CharTermAttribute.class);
+        this.posIncrAttr  = this.addAttribute(PositionIncrementAttribute.class);
+	this.payloadAttr = this.addAttribute(PayloadAttribute.class);
+	this.multiTermTokens  = new ArrayList<MultiTermToken>();
+
+	/*
+        if (!indexTokens.isEmpty()){
+            indexTokens.get(indexTokens.size() - 1).setIncrement(false);
+        };
+	*/
+    };
+
+    public MultiTermTokenStream (String stream) {
+	this();
+
+	int pos = 0;
+
+	Matcher matcher = pattern.matcher(stream);
+
+	while (matcher.find()) {
+
+	    String[] seg = matcher.group(4).split("\\|");
+	    MultiTermToken mtt = new MultiTermToken( seg[0] );
+
+	    if (matcher.group(2) != null)
+		mtt.start = Integer.parseInt(matcher.group(2));
+
+	    if (matcher.group(3) != null)
+		mtt.end = Integer.parseInt(matcher.group(3));
+
+	    for (int i = 1; i < seg.length; i++)
+		mtt.add(seg[i]);
+
+	    this.addMultiTermToken(mtt);
+	};
+    };
+
+    public void addMultiTermToken (MultiTermToken mtt) {
+	this.multiTermTokens.add(mtt);
+    };
+
+    public void addMultiTermToken (MultiTerm term, MultiTerm ... moreTerms) {
+	this.addMultiTermToken(new MultiTermToken(term, moreTerms));
+    };
+
+    public void addMultiTermToken (char prefix, String surface) {
+	this.addMultiTermToken(new MultiTermToken(prefix, surface));
+    };
+
+    public void addMultiTermToken (String surface, String ... moreTerms) {
+	this.addMultiTermToken(new MultiTermToken(surface, moreTerms));
+    };
+
+    public void addMeta (String key, String value) {
+	MultiTerm mt = new MultiTerm('-', key);
+	//	mt.storeOffsets(false);
+	mt.payload(value);
+	this.multiTermTokens.get(0).add(mt);
+    };
+
+    public void addMeta (String key, byte[] value) {
+	MultiTerm mt = new MultiTerm('-', key);
+	//	mt.storeOffsets(false);
+	mt.payload(value);
+	this.multiTermTokens.get(0).add(mt);
+    };
+
+
+    public void addMeta (String key, short value) {
+	MultiTerm mt = new MultiTerm('-', key);
+	//	mt.storeOffsets(false);
+	mt.payload(value);
+	this.multiTermTokens.get(0).add(mt);
+    };
+
+    public void addMeta (String key, long value) {
+	MultiTerm mt = new MultiTerm('-', key);
+	//	mt.storeOffsets(false);
+	mt.payload(value);
+	this.multiTermTokens.get(0).add(mt);
+    };
+
+    public void addMeta (String key, int value) {
+	MultiTerm mt = new MultiTerm('-', key);
+	//	mt.storeOffsets(false);
+	mt.payload(value);
+	this.multiTermTokens.get(0).add(mt);
+    };
+
+    @Override
+    public final boolean incrementToken() throws IOException {
+	this.payloadAttr.setPayload(null);
+
+	if (this.multiTermTokens.size() == this.mttIndex) {
+	    reset();
+	    return false;
+	};
+
+	MultiTermToken mtt = this.multiTermTokens.get( this.mttIndex );
+
+	if (mtt.terms.size() == this.mtIndex) {
+	    this.mtIndex = 0;
+	    this.mttIndex++;
+	    if (this.multiTermTokens.size() == this.mttIndex) {
+		reset();
+		return false;
+	    }
+	    else {
+		mtt = this.multiTermTokens.get( this.mttIndex );
+	    };
+	};
+
+	MultiTerm mt = mtt.terms.get(this.mtIndex);
+
+	// Get the current index token
+
+	// Set the relative position to the former term
+        posIncrAttr.setPositionIncrement( mt.posIncr );
+        charTermAttr.setEmpty();
+	charTermAttr.append( mt.term );
+
+	BytesRef payload = new BytesRef();
+	if (mt.start != mt.end) {
+	    log.trace("MultiTerm with payload offset: {}-{}", mt.start, mt.end);
+	    payload.append(new BytesRef(int2byte(mt.start)));
+	    payload.append(new BytesRef(int2byte(mt.end)));
+	    /*
+	      }
+	      else if (mtt.start != mtt.end) {
+	      payload.append(new BytesRef(int2byte(mtt.start)));
+	      payload.append(new BytesRef(int2byte(mtt.end)));
+	    */
+	};
+
+	// Payload
+	if (mt.payload != null) {
+	    payload.append(mt.payload());
+	    log.trace("Create payload[1] {}", payload.toString());
+	};
+
+	if (payload.length > 0) {
+	    log.trace("Set payload[2] {}", payload.toString());
+	    payloadAttr.setPayload(payload);
+	};
+
+	if (log.isTraceEnabled()) {
+	    StringBuilder sb = new StringBuilder("Index: [");
+	    sb.append(mt.term);
+	    if (payload.length > 0)
+		sb.append('$').append(payload.toString());
+	    sb.append(']');
+	    sb.append(" with increment ").append(mt.posIncr);
+	    log.trace(sb.toString());
+	};
+
+	this.mtIndex++;
+
+        return true;
+    };
+
+    public String toString () {
+	StringBuffer sb = new StringBuffer();
+	for (MultiTermToken mtt : this.multiTermTokens) {
+	    sb.append( mtt.toString() );
+	};
+	return sb.toString();
+    };
+
+    @Override
+    public void reset() {
+	this.mttIndex = 0;
+	this.mtIndex = 0;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/document/KorapPrimaryData.java b/trunk/src/main/java/de/ids_mannheim/korap/document/KorapPrimaryData.java
new file mode 100644
index 0000000..1ee7022
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/document/KorapPrimaryData.java
@@ -0,0 +1,25 @@
+package de.ids_mannheim.korap.document;
+
+public class KorapPrimaryData {
+    private String primary;
+
+    public KorapPrimaryData (String text) {
+	this.primary = text;
+    };
+
+    public String substring (int startOffset) {
+	return this.primary.substring(startOffset);
+    };
+
+    public String substring (int startOffset, int endOffset) {
+	return this.primary.substring(startOffset, endOffset);
+    };
+
+    public String toString () {
+	return this.primary;
+    };
+
+    public int length () {
+	return this.primary.length();
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/filter/BooleanFilter.java b/trunk/src/main/java/de/ids_mannheim/korap/filter/BooleanFilter.java
new file mode 100644
index 0000000..53ffd93
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/filter/BooleanFilter.java
@@ -0,0 +1,73 @@
+package de.ids_mannheim.korap.filter;
+
+import java.util.*;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.index.Term;
+
+/*
+  Todo: !not
+*/
+
+/**
+ * @author Nil Diewald
+ *
+ * BooleanFilter implements a simple API for boolean operations
+ * on constraints for KorapFilter.
+ */
+public class BooleanFilter {
+    private String type;
+    private Query query;
+
+    public BooleanFilter (String type, Query query) {
+	this.type = type;
+	this.query = query;
+    };
+
+    public BooleanFilter or (String ... values) {
+	BooleanQuery bool = new BooleanQuery();
+	bool.add(this.query, BooleanClause.Occur.SHOULD);
+	for (String val : values) {
+	    bool.add(new TermQuery(new Term(this.type, val)), BooleanClause.Occur.SHOULD);
+	};
+	this.query = bool;
+	return this;
+    };
+
+    public BooleanFilter or (RegexFilter value) {
+	BooleanQuery bool = new BooleanQuery();
+	bool.add(this.query, BooleanClause.Occur.SHOULD);
+	bool.add(value.toQuery(this.type), BooleanClause.Occur.SHOULD);
+	this.query = bool;
+	return this;
+    };
+
+    
+    public BooleanFilter and (String value) {
+	BooleanQuery bool = new BooleanQuery();
+	bool.add(this.query, BooleanClause.Occur.MUST);
+	bool.add(new TermQuery(new Term(this.type, value)), BooleanClause.Occur.MUST);
+	this.query = bool;
+	return this;
+    };
+
+    public BooleanFilter and (RegexFilter value) {
+	BooleanQuery bool = new BooleanQuery();
+	bool.add(this.query, BooleanClause.Occur.MUST);
+	bool.add(value.toQuery(this.type), BooleanClause.Occur.MUST);
+	this.query = bool;
+	return this;
+    };
+    
+    public Query toQuery () {
+	return this.query;
+    };
+
+    public String toString () {
+	return this.query.toString();
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/filter/RegexFilter.java b/trunk/src/main/java/de/ids_mannheim/korap/filter/RegexFilter.java
new file mode 100644
index 0000000..32aca72
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/filter/RegexFilter.java
@@ -0,0 +1,27 @@
+package de.ids_mannheim.korap.filter;
+
+import java.util.*;
+
+import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.index.Term;
+
+/**
+ * @author Nils Diewald
+ *
+ * RegexFilter implements a helper object for regular expressions used in KorapFilter
+ * constraints.
+ */
+
+public class RegexFilter {
+    String regex;
+
+    public RegexFilter (String regex) {
+	this.regex = regex;
+    };
+
+    public RegexpQuery toQuery (String field) {
+	return new RegexpQuery(
+            new Term(field, this.regex)
+	);
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/index/FieldDocument.java b/trunk/src/main/java/de/ids_mannheim/korap/index/FieldDocument.java
new file mode 100644
index 0000000..4539b9e
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/index/FieldDocument.java
@@ -0,0 +1,212 @@
+package de.ids_mannheim.korap.index;
+
+import org.apache.lucene.document.Document;
+import de.ids_mannheim.korap.analysis.MultiTermTokenStream;
+import de.ids_mannheim.korap.analysis.MultiTermToken;
+import de.ids_mannheim.korap.KorapDocument;
+import de.ids_mannheim.korap.util.KorapDate;
+
+import com.fasterxml.jackson.annotation.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+
+import java.util.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/*
+Todo: Store primary data at base/cons field.
+All other Termvectors should have no stored field!
+*/
+
+/**
+ * @author Nils Diewald
+ *
+ * FieldDocument implements a simple API to create documents for storing with KorapIndex.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class FieldDocument extends KorapDocument {
+    ObjectMapper mapper = new ObjectMapper();
+
+    private final static Logger log = LoggerFactory.getLogger(FieldDocument.class);
+
+    public Document doc = new Document();
+
+    private FieldType tvField = new FieldType(TextField.TYPE_STORED);
+    private FieldType tvNoField = new FieldType(TextField.TYPE_NOT_STORED);
+
+    //    private HashMap<String, MultiTermTokenStream> termFields;
+
+    {
+	tvField.setStoreTermVectors(true);
+	tvField.setStoreTermVectorPositions(true);
+	tvField.setStoreTermVectorPayloads(true);
+
+	tvNoField.setStoreTermVectors(true);
+	tvNoField.setStoreTermVectorPositions(true);
+	tvNoField.setStoreTermVectorPayloads(true);
+	//	termFields = new HashMap<String, MultiTermTokenStream>();
+    }
+
+    // see http://www.cowtowncoder.com/blog/archives/2011/07/entry_457.html
+
+    /*
+    @JsonCreator
+    public FieldDocument(Map<String,Object> props) {
+      this.id = (String) props.get("id");
+      this.title = (String) props.get("title");
+    };
+
+    public FieldDocument (String json) {
+
+	
+	my $primary = ->{primary}
+	corpus_id, pub_date, id, text_class (Array), author (Array), title, sub_title, pub_place
+
+	foreach (->{fields}) {
+	    foreach (data) {
+		foreach () {
+		}
+	    }
+	};
+created timestamp
+last_modified timestamp or KorapDate
+	
+    };
+*/
+
+    public void addInt (String key, int value) {
+	doc.add(new IntField(key, value, Field.Store.YES));
+    };
+
+    public void addInt (String key, String value) {
+	this.addInt(key, Integer.parseInt(value));
+    };
+
+    public void addText (String key, String value) {
+	doc.add(new TextField(key, value, Field.Store.YES));
+    };
+
+    public void addString (String key, String value) {
+	doc.add(new StringField(key, value, Field.Store.YES));
+    };
+
+    public void addTV (String key, String value, String tsString) {
+	this.addTV(key, value, new MultiTermTokenStream(tsString));
+    };
+
+    public void addTV (String key, String tsString) {
+	this.addTV(key, new MultiTermTokenStream(tsString));
+    };
+
+    public void addTV (String key, String value, MultiTermTokenStream ts) {
+	Field textField = new Field( key, value, tvField );
+	textField.setTokenStream( ts );
+	doc.add(textField);
+    };
+
+    public void addTV (String key, MultiTermTokenStream ts) {
+	Field textField = new Field( key, ts, tvNoField );
+	doc.add(textField);
+    };
+
+    public String toString () {
+	return doc.toString();
+    };
+
+    public MultiTermTokenStream newMultiTermTokenStream (String ts) {
+	return new MultiTermTokenStream(ts);
+    };
+
+    public MultiTermTokenStream newMultiTermTokenStream () {
+	return new MultiTermTokenStream();
+    };
+
+    public void setFields (ArrayList<Map<String,Object>> fields) {
+
+	Map<String,Object> primary = fields.remove(0);
+	this.setPrimaryData((String) primary.get("primaryData"));
+
+	for (Map<String,Object> field : fields) {
+	    String fieldName = (String) field.get("name");
+	    MultiTermTokenStream mtts = this.newMultiTermTokenStream();
+
+	    for (ArrayList<String> token : (ArrayList<ArrayList<String>>) field.get("data")) {
+
+		MultiTermToken mtt = new MultiTermToken(token.remove(0));
+
+		for (String term : token) {
+		    mtt.add(term);
+		};
+
+		mtts.addMultiTermToken(mtt);
+	    };
+
+	    this.addTV(fieldName, this.getPrimaryData(), mtts);
+	};
+    };
+
+    @Override
+    public void setTextClass (String textClass) {
+	super.setTextClass(textClass);
+	this.addText("textClass", textClass);
+    };
+
+    @Override
+    public void setTitle (String title) {
+	super.setTitle(title);
+	this.addText("title", title);
+    };
+
+    @Override
+    public void setSubTitle (String subTitle) {
+	super.setSubTitle(subTitle);
+	this.addText("subTitle", subTitle);
+    };
+
+    @Override
+    public void setAuthor (String author) {
+	super.setAuthor(author);
+	this.addText("author", author);
+    };
+
+    @Override
+    public void setPubPlace (String pubPlace) {
+	super.setPubPlace(pubPlace);
+	this.addString("pubPlace", pubPlace);
+    };
+
+    @JsonProperty("pubDate")
+    @Override
+    public KorapDate setPubDate (String pubDate) {
+	KorapDate date = super.setPubDate(pubDate);
+	this.addInt("pubDate", date.toString());
+	return date;
+    };
+
+    @Override
+    public void setCorpusID (String corpusID) {
+	super.setCorpusID(corpusID);
+	this.addString("corpusID", corpusID);
+    };
+
+    @Override
+    public void setID (String ID) {
+	super.setID(ID);
+	this.addString("ID", ID);
+    };
+
+    public void setFoundries (String foundry) {
+	this.addText("foundries", foundry);
+    };
+
+    public void setTokenization (String tokenization) {
+	this.addString("tokenization", tokenization);
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/index/PositionsToOffset.java b/trunk/src/main/java/de/ids_mannheim/korap/index/PositionsToOffset.java
new file mode 100644
index 0000000..62eb516
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/index/PositionsToOffset.java
@@ -0,0 +1,213 @@
+package de.ids_mannheim.korap.index;
+
+import java.util.*;
+import java.io.*;
+
+import java.nio.ByteBuffer;
+
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class PositionsToOffset {
+    private String field;
+    private AtomicReaderContext atomic;
+    private boolean processed = false;
+    private Integer[] pair;
+    private static ByteBuffer bbOffset = ByteBuffer.allocate(8);
+
+    HashSet<PositionsToOffsetArray> positions;
+    HashMap<PositionsToOffsetArray, Integer[]> offsets;
+
+    private final static Logger log = LoggerFactory.getLogger(PositionsToOffset.class);
+
+    private class PositionsToOffsetArray {
+	public int docID;
+	public int pos;
+   
+	public PositionsToOffsetArray (int docID, int pos) {
+	    this.docID = docID;
+	    this.pos = pos;
+	};
+
+	public int hashCode(){
+	    long hashCode;
+	    hashCode = (docID * Integer.MAX_VALUE) - Integer.MAX_VALUE + pos;
+	    return new Long(hashCode).hashCode();
+	};
+     
+	public boolean equals(Object obj){
+	    if (obj instanceof PositionsToOffsetArray) {
+		PositionsToOffsetArray ptoa = (PositionsToOffsetArray) obj;
+		return (ptoa.docID == this.docID && ptoa.pos == this.pos);
+	    };
+	    return false;
+	};
+    };
+
+    public PositionsToOffset (AtomicReaderContext atomic, String field) {
+	this.field = field;
+	this.atomic = atomic;
+	this.positions = new HashSet<>(64);
+	this.offsets = new HashMap<>(64);
+    };
+
+    public void clear () {
+	this.positions.clear();
+	this.offsets.clear();
+	this.bbOffset.clear();
+	this.processed = false;
+    };
+
+    public void add (int docID, int pos) {
+	this.add(new PositionsToOffsetArray(docID, pos));
+    };
+
+    public void add (PositionsToOffsetArray ptoa) {
+	this.positions.add(ptoa);
+	this.processed = false;
+    };
+
+    public boolean exists (int docID, int pos) {
+	return this.offsets.containsKey(new PositionsToOffsetArray(docID, pos));
+    };
+
+    public boolean exists (PositionsToOffsetArray ptoa) {
+	return this.offsets.containsKey(ptoa);
+    };
+
+    public int start (int docID, int pos) {
+	return this.start(new PositionsToOffsetArray(docID, pos));
+    };
+
+    public int start (PositionsToOffsetArray ptoa) {
+	if (ptoa.pos < 0)
+	    return 0;
+
+	if (!processed)
+	    this.offsets();
+
+	Integer[] pair = this.offsets.get(ptoa);
+
+	if (pair == null)
+	    return 0;
+
+	return pair[0];
+    };
+
+    public int end (int docID, int pos) {
+	return this.end(new PositionsToOffsetArray(docID, pos));
+    };
+
+    public int end (PositionsToOffsetArray ptoa) {
+	if (ptoa.pos < 0)
+	    return -1;
+
+	if (!processed)
+	    this.offsets();
+
+	Integer[] pair = this.offsets.get(ptoa);
+	if (pair == null)
+	    return -1;
+	return  pair[1];
+    };
+
+    public Integer[] span (int docID, int pos) {
+	return this.span(new PositionsToOffsetArray(docID, pos));
+    };
+
+    public Integer[] span (PositionsToOffsetArray ptoa) {
+	if (!processed)
+	    this.offsets();
+	return this.offsets.get(ptoa);
+    };
+
+
+    public HashMap<PositionsToOffsetArray, Integer[]> offsets () {
+	if (processed)
+	    return offsets;
+
+	StringBuilder sb = new StringBuilder().append('_');
+
+	try {
+	    Terms terms = atomic.reader().fields().terms(field);
+
+	    if (terms != null) {
+		// Todo: Maybe reuse a termsEnum!
+
+		final TermsEnum termsEnum = terms.iterator(null);
+
+		for (PositionsToOffsetArray posDoc : positions) {
+		    if (this.exists(posDoc))
+			continue;
+
+		    int docID = posDoc.docID;
+
+		    /*
+		    int pos = posDoc[1];
+		    Integer[] posDoc2 = new Integer[2];
+		    posDoc2[0] = docID;
+		    posDoc2[1] = pos;
+		    */
+
+		    sb.append(posDoc.pos);
+
+		    Term term = new Term(field, sb.toString());
+		    sb.setLength(1);
+
+		    if (termsEnum.seekExact(term.bytes(), true)) {
+			
+			log.trace("Search for {} in doc {} with pos {}", term.toString(), posDoc.docID, posDoc.pos);
+
+			// Start an iterator to fetch all payloads of the term
+			DocsAndPositionsEnum docs = termsEnum.docsAndPositions(
+                            null,
+			    null,
+			    DocsAndPositionsEnum.FLAG_PAYLOADS
+		        );
+
+			if (docs.advance(docID) == docID) {
+			    docs.nextPosition();
+
+			    BytesRef payload = docs.getPayload();
+
+			    if (payload.length == 8) {
+				bbOffset.clear();
+				bbOffset.put(payload.bytes, payload.offset, 8);
+				bbOffset.rewind();
+				Integer[] offsetArray = new Integer[2];
+				offsetArray[0] = bbOffset.getInt();
+				offsetArray[1] = bbOffset.getInt();
+				offsets.put(posDoc, offsetArray);
+
+				log.trace("Found {}-{} for {}", offsetArray[0], offsetArray[1], term.toString());
+			    }
+
+			    else {
+				log.error(
+				    "Doc {} has no offsets stored for {}",
+				    docID,
+				    term.toString()
+				);
+			    };
+			};
+		    };
+		};
+	    };
+	}
+	catch (IOException e) {
+	    // log.warn(e.getLocalizedMessage());
+	};
+
+	processed = true;
+	positions.clear();
+	return offsets;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/SpanClassQuery.java b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanClassQuery.java
new file mode 100644
index 0000000..5ce4e70
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanClassQuery.java
@@ -0,0 +1,128 @@
+package de.ids_mannheim.korap.query;
+
+import java.io.IOException;
+
+import java.util.Set;
+import java.util.Map;
+
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
+import de.ids_mannheim.korap.query.spans.ClassSpans;
+
+
+/**
+ * Marks spans with a special class payload.
+ */
+public class SpanClassQuery extends SpanQuery {
+    public String field;
+    protected byte number;
+    protected SpanQuery highlight;
+
+    public SpanClassQuery (SpanQuery highlight, byte number) {
+	this.field = highlight.getField();
+	this.highlight = highlight;
+	this.number = number;
+    };
+
+    public SpanClassQuery (SpanQuery highlight) {
+	this.field = highlight.getField();
+	this.highlight = highlight;
+	this.number = (byte) 0;
+    };
+
+    public byte number () {
+	return this.number;
+    };
+
+    @Override
+    public String getField () { return field; }
+
+    @Override
+    public void extractTerms (Set<Term> terms) {
+	this.highlight.extractTerms(terms);
+    };
+
+    @Override
+    public String toString (String field) {
+	StringBuffer buffer = new StringBuffer("{");
+	buffer.append((int) this.number).append(": ");
+        buffer.append(this.highlight.toString()).append('}');
+	buffer.append(ToStringUtils.boost(getBoost()));
+	return buffer.toString();
+    };
+
+    @Override
+    public Spans getSpans (final AtomicReaderContext context,
+			   Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+	return (Spans) new ClassSpans(
+	    this.highlight,
+	    context,
+	    acceptDocs,
+	    termContexts,
+	    number
+        );
+    };
+
+    @Override
+    public Query rewrite (IndexReader reader) throws IOException {
+	SpanClassQuery clone = null;
+	SpanQuery query = (SpanQuery) this.highlight.rewrite(reader);
+
+	if (query != this.highlight) {
+	    if (clone == null)
+		clone = this.clone();
+	    clone.highlight = query;
+	};
+
+	if (clone != null)
+	    return clone;
+
+	return this;
+    };
+
+    @Override
+    public SpanClassQuery clone() {
+	SpanClassQuery spanClassQuery = new SpanClassQuery(
+	    (SpanQuery) this.highlight.clone(),
+	    this.number
+        );
+	spanClassQuery.setBoost(getBoost());
+	return spanClassQuery;
+    };
+
+
+    /** Returns true iff <code>o</code> is equal to this. */
+    @Override
+    public boolean equals(Object o) {
+	if (this == o) return true;
+	if (!(o instanceof SpanClassQuery)) return false;
+	
+	final SpanClassQuery spanClassQuery = (SpanClassQuery) o;
+	
+	if (!highlight.equals(spanClassQuery.highlight)) return false;
+
+	if (this.number != spanClassQuery.number) return false;
+
+	return getBoost() == spanClassQuery.getBoost();
+    };
+
+
+    // I don't know what I am doing here
+    @Override
+    public int hashCode() {
+	int result = 1;
+	result = highlight.hashCode();
+	result += (int) number;
+	result ^= (result << 15) | (result >>> 18);
+	result += Float.floatToRawIntBits(getBoost());
+	return result;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/SpanElementQuery.java b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanElementQuery.java
new file mode 100644
index 0000000..86c5d56
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanElementQuery.java
@@ -0,0 +1,128 @@
+package de.ids_mannheim.korap.query;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.index.TermState;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+import org.apache.lucene.search.spans.Spans;
+
+import de.ids_mannheim.korap.query.spans.ElementSpans;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+
+/** Matches spans wrapped by an element. */
+public class SpanElementQuery extends SpanQuery {
+    protected Term element;
+    private String elementStr;
+    private String field;
+    
+    /** Constructor. */
+    public SpanElementQuery(String field, String term) {
+	StringBuilder sb = new StringBuilder("<>:");
+	this.field = field;
+	this.elementStr = term;
+	this.element = new Term(field, sb.append(term).toString());
+    };
+
+    /** Return the element whose spans are matched. */
+    public Term getElement() { return element; };
+
+    @Override
+    public String getField() { return element.field(); };
+  
+    @Override
+    public void extractTerms(Set<Term> terms) {
+	terms.add(element);
+    };
+
+    @Override
+    public String toString(String field) {
+	StringBuilder buffer = new StringBuilder("<");
+	buffer.append(this.field).append(':').append(elementStr);
+	buffer.append(ToStringUtils.boost(getBoost()));
+	return buffer.append(" />").toString();
+    };
+
+    @Override
+    public int hashCode() {
+	final int prime = 37; // Instead of 31
+	int result = super.hashCode();
+	result = prime * result + ((element == null) ? 0 : element.hashCode());
+	return result;
+    };
+
+    @Override
+    public boolean equals(Object obj) {
+	if (this == obj)
+	    return true;
+	if (!super.equals(obj))
+	    return false;
+	if (getClass() != obj.getClass())
+	    return false;
+	SpanElementQuery other = (SpanElementQuery) obj;
+	if (element == null) {
+	    if (other.element != null)
+		return false;
+	} else if (!element.equals(other.element))
+	    return false;
+	return true;
+    };
+
+    @Override
+    public Spans getSpans(final AtomicReaderContext context,
+			  Bits acceptDocs,
+			  Map<Term,TermContext> termContexts) throws IOException {
+	TermContext termContext = termContexts.get(element);
+	final TermState state;
+	if (termContext == null) {
+	    // this happens with span-not query,
+	    // as it doesn't include the NOT side in extractTerms()
+	    // so we seek to the term now in this segment...,
+	    // this sucks because its ugly mostly!
+	    final Fields fields = context.reader().fields();
+	    if (fields != null) {
+		final Terms terms = fields.terms(element.field());
+		if (terms != null) {
+		    final TermsEnum termsEnum = terms.iterator(null);
+		    if (termsEnum.seekExact(element.bytes(), true)) { 
+			state = termsEnum.termState();
+		    } else {
+			state = null;
+		    }
+		} else {
+		    state = null;
+		}
+	    } else {
+		state = null;
+	    }
+	} else {
+	    state = termContext.get(context.ord);
+	};
+    
+	if (state == null) { // term is not present in that reader
+	    return ElementSpans.EMPTY_ELEMENT_SPANS;
+	};
+    
+	final TermsEnum termsEnum = context.reader().terms(element.field()).iterator(null);
+	termsEnum.seekExact(element.bytes(), state);
+    
+	final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+
+	if (postings != null) {
+	    return new ElementSpans(postings, element);
+	};
+
+	// element does exist, but has no positions
+	throw new IllegalStateException("field \"" + element.field() + "\" was indexed without position data; cannot run SpanElementQuery (element=" + element.text() + ")");
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/SpanMatchModifyQuery.java b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanMatchModifyQuery.java
new file mode 100644
index 0000000..d2b7a0b
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanMatchModifyQuery.java
@@ -0,0 +1,121 @@
+package de.ids_mannheim.korap.query;
+
+import java.io.IOException;
+
+import java.util.Set;
+import java.util.Map;
+
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
+import de.ids_mannheim.korap.query.spans.MatchModifySpans;
+import de.ids_mannheim.korap.query.SpanClassQuery;
+
+/**
+ * Shrinks spans to a classed span.
+ */
+public class SpanMatchModifyQuery extends SpanClassQuery {
+    private boolean divide = false;
+
+    public SpanMatchModifyQuery (SpanQuery highlight, byte number, boolean divide) {
+	super(highlight, number);
+	this.divide = divide;
+    };
+
+    public SpanMatchModifyQuery (SpanQuery highlight, boolean divide) {
+	this(highlight, (byte) 0, divide);
+    };
+
+    public SpanMatchModifyQuery (SpanQuery highlight, byte number) {
+	this(highlight, number, false);
+    };
+
+    public SpanMatchModifyQuery (SpanQuery highlight) {
+	this(highlight, (byte) 0, false);
+    };
+
+    @Override
+    public String toString (String field) {
+	StringBuffer buffer = new StringBuffer();
+	if (divide) {
+	    buffer.append("split(");
+	}
+	else {
+	    buffer.append("shrink(");
+	};
+	buffer.append((int) this.number).append(": ");
+        buffer.append(this.highlight.toString());
+	buffer.append(')');
+
+	buffer.append(ToStringUtils.boost(getBoost()));
+	return buffer.toString();
+    };
+
+    @Override
+    public Spans getSpans (final AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+	return (Spans) new MatchModifySpans(this.highlight, context, acceptDocs, termContexts, number, divide);
+    };
+
+    @Override
+    public Query rewrite (IndexReader reader) throws IOException {
+	SpanMatchModifyQuery clone = null;
+	SpanQuery query = (SpanQuery) this.highlight.rewrite(reader);
+
+	if (query != this.highlight) {
+	    if (clone == null)
+		clone = this.clone();
+	    clone.highlight = query;
+	};
+
+	if (clone != null)
+	    return clone;
+
+	return this;
+    };
+
+    @Override
+    public SpanMatchModifyQuery clone() {
+	SpanMatchModifyQuery spanMatchQuery = new SpanMatchModifyQuery(
+	    (SpanQuery) this.highlight.clone(),
+	    this.number,
+	    this.divide
+        );
+	spanMatchQuery.setBoost(getBoost());
+	return spanMatchQuery;
+    };
+
+
+    /** Returns true iff <code>o</code> is equal to this. */
+    @Override
+    public boolean equals(Object o) {
+	if (this == o) return true;
+	if (!(o instanceof SpanMatchModifyQuery)) return false;
+	
+	final SpanMatchModifyQuery spanMatchModifyQuery = (SpanMatchModifyQuery) o;
+	
+	if (!highlight.equals(spanMatchModifyQuery.highlight)) return false;
+	if (this.number != spanMatchModifyQuery.number) return false;
+	if (this.divide != spanMatchModifyQuery.divide) return false;
+	return getBoost() == spanMatchModifyQuery.getBoost();
+    };
+
+
+    // I don't know what I am doing here
+    @Override
+    public int hashCode() {
+	int result = 1;
+	result = highlight.hashCode();
+	result += number + 33_333;
+	result += divide ? 1 : 0;
+	result ^= (result << 15) | (result >>> 18);
+	result += Float.floatToRawIntBits(getBoost());
+	return result;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/SpanNextQuery.java b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanNextQuery.java
new file mode 100644
index 0000000..370be81
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanNextQuery.java
@@ -0,0 +1,172 @@
+package de.ids_mannheim.korap.query;
+
+// Based on SpanNearQuery
+
+/*
+  Todo: Make one Spanarray and switch between the results of A and B.
+*/
+
+import java.io.IOException;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.spans.Spans;
+
+import de.ids_mannheim.korap.query.spans.NextSpans;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Matches spans which are directly next to each other.
+ */
+public class SpanNextQuery extends SpanQuery implements Cloneable {
+    private SpanQuery firstClause;
+    private SpanQuery secondClause;
+    public String field;
+    private boolean collectPayloads;
+
+    // Logger
+    private final static Logger log = LoggerFactory.getLogger(SpanNextQuery.class);
+
+    // Constructor
+    public SpanNextQuery(SpanQuery firstClause,
+			 SpanQuery secondClause) {
+	this(firstClause, secondClause, true);
+    };
+
+    // Constructor  
+    public SpanNextQuery(SpanQuery firstClause,
+			 SpanQuery secondClause,
+			 boolean collectPayloads) {
+
+	this.field = secondClause.getField();
+	if (!firstClause.getField().equals(field)) {
+	    throw new IllegalArgumentException("Clauses must have same field.");
+	};
+
+	this.collectPayloads = collectPayloads;
+	this.firstClause = firstClause;
+	this.secondClause = secondClause;
+    };
+
+
+    @Override
+    public String getField() { return field; }
+
+    public SpanQuery firstClause() { return firstClause; };
+
+    public SpanQuery secondClause() { return secondClause; };
+  
+    @Override
+    public void extractTerms(Set<Term> terms) {
+	firstClause.extractTerms(terms);
+	secondClause.extractTerms(terms);
+    };
+  
+
+    @Override
+    public String toString(String field) {
+	StringBuilder buffer = new StringBuilder();
+	buffer.append("spanNext(");
+	buffer.append(firstClause.toString(field));
+        buffer.append(", ");
+	buffer.append(secondClause.toString(field));
+	buffer.append(")");
+	buffer.append(ToStringUtils.boost(getBoost()));
+	return buffer.toString();
+    };
+
+    @Override
+    public Spans getSpans (final AtomicReaderContext context,
+			   Bits acceptDocs,
+			   Map<Term,TermContext> termContexts) throws IOException {
+
+	log.trace("Get Spans");
+	return (Spans) new NextSpans (
+            this, context, acceptDocs, termContexts, collectPayloads
+	);
+    };
+
+    @Override
+    public Query rewrite (IndexReader reader) throws IOException {
+	// System.err.println(">> Rewrite query");
+
+	SpanNextQuery clone = null;
+
+	// System.err.println(">> Rewrite first clause");
+	SpanQuery query = (SpanQuery) firstClause.rewrite(reader);
+
+	if (query != firstClause) {
+	    if (clone == null)
+		clone = this.clone();
+	    clone.firstClause = query;
+	};
+
+	// System.err.println(">> Rewrite second clause");
+	query = (SpanQuery) secondClause.rewrite(reader);
+	if (query != secondClause) {
+	    if (clone == null)
+		clone = this.clone();
+	    clone.secondClause = query;
+	};
+
+	if (clone != null) {
+	    // System.err.println(">> Clone is not null");
+	    return clone;
+	};
+
+	return this;
+    };
+  
+
+    @Override
+    public SpanNextQuery clone() {
+	SpanNextQuery spanNextQuery = new SpanNextQuery(
+	    (SpanQuery) firstClause.clone(),
+	    (SpanQuery) secondClause.clone(),
+	    this.collectPayloads
+        );
+	spanNextQuery.setBoost(getBoost());
+	return spanNextQuery;
+    };
+
+
+    /** Returns true iff <code>o</code> is equal to this. */
+    @Override
+    public boolean equals(Object o) {
+	if (this == o) return true;
+	if (!(o instanceof SpanNextQuery)) return false;
+	
+	final SpanNextQuery spanNextQuery = (SpanNextQuery) o;
+	
+	if (collectPayloads != spanNextQuery.collectPayloads) return false;
+	if (!firstClause.equals(spanNextQuery.firstClause)) return false;
+	if (!secondClause.equals(spanNextQuery.secondClause)) return false;
+
+	return getBoost() == spanNextQuery.getBoost();
+    };
+
+
+    // I don't know what I am doing here
+    @Override
+    public int hashCode() {
+	int result;
+	result = firstClause.hashCode() + secondClause.hashCode();
+	result ^= (result << 31) | (result >>> 2);  // reversible
+	result += Float.floatToRawIntBits(getBoost());
+	return result;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/SpanWithinQuery.java b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanWithinQuery.java
new file mode 100644
index 0000000..8e9235d
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/SpanWithinQuery.java
@@ -0,0 +1,196 @@
+package de.ids_mannheim.korap.query;
+
+// Based on SpanNearQuery
+import java.io.IOException;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanTermQuery;
+
+import de.ids_mannheim.korap.query.spans.WithinSpans;
+import de.ids_mannheim.korap.query.SpanElementQuery;
+
+/**
+ * Matches spans that are within certain elements.
+ */
+public class SpanWithinQuery extends SpanQuery implements Cloneable {
+    private SpanQuery wrap;
+    private SpanQuery embedded;
+    public String field;
+    private short flag;
+    private boolean collectPayloads;
+
+    public static final short
+	WITHIN = 0,
+	STARTSWITH = 1,
+	ENDSWITH = 2,
+	MATCH = 3;
+
+    // may support "starting" and "ending"
+
+    // Constructor
+    public SpanWithinQuery (SpanQuery wrap,
+			    SpanQuery embedded,
+			    short flag,
+			    boolean collectPayloads) {
+
+	this.field = embedded.getField();
+	this.embedded = embedded;
+	this.wrap = wrap;
+	this.flag = flag;
+	this.collectPayloads = collectPayloads;
+    };
+
+    // Constructor
+    public SpanWithinQuery(String element,
+			   SpanQuery embedded) {
+	this(element, embedded, (short) 0, true);
+    };
+
+    // Constructor
+    public SpanWithinQuery (String element,
+			    SpanQuery embedded,
+			    short flag,
+			    boolean collectPayloads) {
+	this(
+	     (SpanQuery) new SpanElementQuery(embedded.getField(), element),
+	     embedded,
+	     flag,
+	     collectPayloads
+	);
+    };
+
+
+    // Constructor
+    public SpanWithinQuery(String element,
+			   SpanQuery embedded,
+			   short flag) {
+	this(element, embedded, flag, true);
+    };
+
+    // Constructor
+    public SpanWithinQuery (String element,
+			    SpanQuery embedded,
+			    boolean collectPayloads) {
+	this(element, embedded, (short) 0, collectPayloads);
+    };
+
+
+    // Constructor
+    public SpanWithinQuery(SpanQuery wrap,
+			   SpanQuery embedded,
+			   short flag) {
+	this(wrap, embedded, flag, true);
+    };
+
+    // Constructor
+    public SpanWithinQuery(SpanQuery wrap,
+			   SpanQuery embedded) {
+	this(wrap, embedded, (short) 0, true);
+    };
+
+
+    @Override
+    public String getField()    { return field;    };
+    public SpanQuery wrap()     { return wrap;     };
+    public SpanQuery embedded() { return embedded; };
+    public short flag()         { return flag; };
+  
+    @Override
+    public void extractTerms(Set<Term> terms) {
+	embedded.extractTerms(terms);
+    };
+  
+    @Override
+    public String toString(String field) {
+	StringBuilder buffer = new StringBuilder();
+	buffer.append("spanWithin(");
+        buffer.append(wrap.toString());
+        buffer.append(", ");
+	buffer.append(embedded.toString(field));
+        buffer.append(")");
+	buffer.append(ToStringUtils.boost(getBoost()));
+	return buffer.toString();
+    };
+
+    @Override
+    public Spans getSpans (final AtomicReaderContext context,
+			   Bits acceptDocs,
+			   Map<Term,TermContext> termContexts) throws IOException {
+	return (Spans) new WithinSpans (
+            this, context, acceptDocs, termContexts, this.flag
+	);
+    };
+
+    @Override
+    public Query rewrite (IndexReader reader) throws IOException {
+	SpanWithinQuery clone = null;
+
+	SpanQuery query = (SpanQuery) embedded.rewrite(reader);
+
+	if (query != embedded) {
+	    if (clone == null)
+		clone = this.clone();
+	    clone.embedded = query;
+	};
+
+	if (clone != null)
+	    return clone;
+
+	return this;
+    };
+  
+
+    @Override
+    public SpanWithinQuery clone () {
+	SpanWithinQuery spanWithinQuery = new SpanWithinQuery(
+            (SpanQuery) wrap.clone(),
+	    (SpanQuery) embedded.clone(),
+	    this.flag,
+	    this.collectPayloads
+        );
+	spanWithinQuery.setBoost(getBoost());
+	return spanWithinQuery;
+    };
+
+
+    /** Returns true iff <code>o</code> is equal to this. */
+    @Override
+    public boolean equals(Object o) {
+	if (this == o) return true;
+	if (!(o instanceof SpanWithinQuery)) return false;
+	
+	final SpanWithinQuery spanWithinQuery = (SpanWithinQuery) o;
+	
+	if (collectPayloads != spanWithinQuery.collectPayloads) return false;
+	if (!wrap.equals(spanWithinQuery.wrap)) return false;
+	if (!embedded.equals(spanWithinQuery.embedded)) return false;
+
+	return getBoost() == spanWithinQuery.getBoost();
+    };
+
+    // I don't know what I am doing here
+    @Override
+    public int hashCode() {
+	int result = 1;
+	result = embedded.hashCode();
+	result += wrap.hashCode();
+	result ^= (result << 4) | (result >>> 29);
+	result += Float.floatToRawIntBits(getBoost());
+	return result;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/ClassSpans.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/ClassSpans.java
new file mode 100644
index 0000000..e52ee73
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/ClassSpans.java
@@ -0,0 +1,115 @@
+package de.ids_mannheim.korap.query.spans;
+
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+import java.util.Map;
+import java.util.ArrayList;
+import java.util.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+
+public class ClassSpans extends Spans {
+    private List<byte[]> highlightedPayload;
+    private Collection<byte[]> payload;
+    private final Spans spans;
+    private byte number;
+    private ByteBuffer bb;
+    private SpanQuery highlight;
+    private final Logger log = LoggerFactory.getLogger(ClassSpans.class);
+
+    public ClassSpans (SpanQuery highlight, AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts, byte number) throws IOException {
+	spans = highlight.getSpans(context, acceptDocs, termContexts);
+	this.number = number;
+	this.highlight = highlight;
+	this.highlightedPayload = new ArrayList<byte[]>(6);
+    };
+
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+	/*
+	for (byte[] x: highlightedPayload) {
+	    ByteBuffer b = ByteBuffer.wrap(x, 0, x.length);
+	    log.trace(">> Get Payload: {}-{} in class {}", b.getInt(), b.getInt(), b.get());
+	};
+	*/
+	return highlightedPayload;
+    };
+
+    @Override
+    public boolean isPayloadAvailable() {
+	// return highlightedPayload.isEmpty() == false;
+	return true;
+    };
+
+    public int doc() { return spans.doc(); }
+
+    // inherit javadocs
+    @Override
+    public int start() { return spans.start(); }
+
+    // inherit javadocs
+    @Override
+    public int end() { return spans.end(); }
+
+
+    // inherit javadocs
+    @Override
+    public boolean next() throws IOException {
+	log.trace("Forward next");
+
+	if (spans.next()) {
+
+	    highlightedPayload.clear();
+
+	    if (spans.isPayloadAvailable()) {
+		highlightedPayload.addAll(spans.getPayload());
+		log.trace("Found payload");
+	    };
+
+
+	    log.trace("Start to create class {} with span {} - {}",
+		      number,
+		      spans.start(),
+		      spans.end());
+
+	    // Todo: Better allocate using a Factory!
+
+	    bb = ByteBuffer.allocate(9);
+
+	    bb.putInt(spans.start()).putInt(spans.end()).put(number);
+	    // Add highlight information as byte after offsets
+	    highlightedPayload.add(bb.array());
+	    return true;
+	};
+	return false;
+    };
+
+    // inherit javadocs
+    @Override
+    public boolean skipTo(int target) throws IOException {
+	return spans.skipTo(target);
+    };
+
+    @Override
+    public String toString() {
+	return getClass().getName() + "(" + this.highlight.toString() + ")@" +
+	    (doc() + ":" + start() + "-" + end());
+    };
+
+
+    @Override
+    public long cost() {
+	return spans.cost();
+    }
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/ElementSpans.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/ElementSpans.java
new file mode 100644
index 0000000..902a33b
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/ElementSpans.java
@@ -0,0 +1,505 @@
+package de.ids_mannheim.korap.query.spans;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.util.BytesRef;
+
+import java.nio.ByteBuffer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.ArrayList;
+import java.util.List;
+
+// TODO: Store payloads in 12 byte instead of the complicated ByteBuffer stuff!
+
+import de.ids_mannheim.korap.query.spans.KorapTermSpan;
+
+public class ElementSpans extends Spans {
+    private byte[] payloadByte = new byte[4];
+    private ByteBuffer bb = ByteBuffer.allocate(4);
+
+    protected final DocsAndPositionsEnum postings;
+    protected final Term term;
+    private int doc, freq, count, position, end;
+    protected boolean readPayload;
+
+    private  LinkedList<KorapTermSpan> memory;
+    private ByteBuffer storedPayload = ByteBuffer.allocate(128);
+    boolean hasStoredPayload = false;
+
+    private KorapTermSpan overflow, tempSpan;
+
+    private final static Logger log = LoggerFactory.getLogger(ElementSpans.class);
+
+    public ElementSpans(DocsAndPositionsEnum postings, Term term) {
+	this.postings = postings;
+	this.term = term;
+	this.doc = -1;
+	this.end = -1;
+	storedPayload.clear();
+	hasStoredPayload = false;
+	// storedPayload = null;
+	memory = new LinkedList<KorapTermSpan>();
+	overflow = new KorapTermSpan();
+	tempSpan = new KorapTermSpan();
+    };
+
+    // only for EmptyElementSpans (below)
+    ElementSpans() {
+	term = null;
+	postings = null;
+    };
+
+    @Override
+    public boolean next() throws IOException {
+	end = -1;
+
+	if (memory.size() > 0) {
+	    log.trace("There is a memory entry");
+
+	    _setToCurrent(memory.removeFirst());
+
+	    log.trace("Current1: [{}-{}]", position, end);
+
+	    return true;
+	};
+
+	log.trace("There is no memory entry");
+
+	if (count == freq) {
+	    log.trace("last position in document");
+
+	    // Check for overflow on document boundary
+	    if (overflow.start != -1) {
+		log.trace("  but there is an overflow");
+
+		_setToCurrent(overflow).clear();
+
+		log.trace("Current2: [{}-{}]", position, end);
+
+		return true;
+	    };
+
+	    if (postings == null) {
+		log.trace("no more postings");
+		return false;
+	    };
+
+	    log.trace("Go to next doc");
+
+	    doc = postings.nextDoc();
+
+	    if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+		log.trace("no more docs");
+		return false;
+	    };
+
+	    // New doc!
+	    end = -1;
+	    storedPayload.clear();
+	    hasStoredPayload = false;
+
+	    freq = postings.freq();
+	    count = 0;
+	};
+
+	int pos = overflow.start;
+	
+	while (true) {
+	    /*
+	    if (DEBUG)
+		System.err.println(">> Reset end and payload");
+	    storedPayload.clear();
+	    end = -1;
+	    */
+
+	    log.trace("pos is {}", pos);
+
+	    _log_payloads(1);
+
+	    if (count == freq) {
+		log.trace("last position in document");
+
+		if (postings == null) {
+
+		    log.trace("no more postings");
+
+		    // Check for overflow on document boundary
+		    if (overflow.start != -1) {
+			log.trace("  but there is an overflow");
+
+			_setToCurrent(overflow).clear();
+			log.trace("Current3: [{}-{}]", position, end);
+
+			return true;
+		    };
+
+		    return false;
+		};
+
+		log.trace("go to next doc");
+		_log_payloads(2);
+
+		if (overflow.start != -1) {
+		    log.trace("Storing overflow {} ...", overflow.toString());
+		    log.trace("... in memory with {}-{}", overflow.startChar(), overflow.endChar());
+		    memory.add((KorapTermSpan) overflow.clone());
+		    overflow.clear();
+		};
+		_log_payloads(3);
+
+		if (memory.size() > 0) {
+		    log.trace("sort and return first");
+		    _log_payloads(4);
+		    Collections.sort(memory);
+		    _log_payloads(5);
+		    _setToCurrent(memory.removeFirst());
+		    _log_payloads(6);
+
+		    log.trace("Current4: [{}-{}]]", position, end);
+		    break;
+		};
+
+		doc = postings.nextDoc();
+		// New doc
+		end = -1;
+		pos = -1;
+
+		if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+		    log.trace("no more docs");
+		    return false;
+		};
+
+		freq = postings.freq();
+		count = 0;
+	    };
+
+
+	    log.trace("Forward postings");
+	    position = postings.nextPosition();
+	    // New pos!
+	    end = -1;
+	    _log_payloads(9);
+	    log.trace("CLEAR PAYLOAD");
+	    storedPayload.clear();
+	    hasStoredPayload = false;
+	    _log_payloads(10);
+
+	    count++;
+
+	    log.trace("next position is {}", position);
+
+	    // There was no overflow
+	    if (pos == -1 || pos == position) {
+		if (pos == position) {
+		    log.trace("Add overflow to memory");
+		    memory.add((KorapTermSpan) overflow.clone());
+		}
+
+		else {
+		    log.trace("There was no overflow");
+		    pos = position;
+		};
+
+		_log_payloads(8);
+		log.trace("*****************************");
+		_setCurrentTo(overflow);
+		log.trace("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
+
+		log.trace("Set overflow and continue: {} ...", overflow.toString());
+		log.trace("... with {}-{}", overflow.startChar(), overflow.endChar());
+		continue;
+	    }
+
+	    // overflow was older
+	    else if (pos != position) {
+
+		log.trace("Overflow was older");
+
+		// Use memory
+		if (memory.size() > 0) {
+
+		    log.trace("Add overflow to memory");
+
+		    memory.add((KorapTermSpan) overflow.clone());
+
+		    log.trace("Sort memory");
+
+		    // Sort by end position
+		    Collections.sort(memory);
+
+		    // Store current information in overflow
+		    _setCurrentTo(overflow);
+
+		    log.trace("Set new overflow: {}", overflow.toString());
+
+		    log.trace("Get first element from sorted memory");
+
+		    _setToCurrent(memory.removeFirst());
+		}
+
+		// Nothing in memory - use overflow!
+		else {
+
+		    log.trace("There is nothing in memory");
+
+		    /* Make overflow active and store last position in overflow */
+		    _setCurrentTo(tempSpan);
+
+		    log.trace("Temp is now {}", overflow.toString());
+
+		    _setToCurrent(overflow);
+		    
+		    // Store current information in overflow
+		    overflow.copyFrom(tempSpan);
+
+		    log.trace("Overflow is now {}", overflow.toString());
+
+		};
+		break;
+	    };
+	};
+	log.trace("Current4: [{}-{}]", position, end);
+
+	readPayload = false;
+	return true;
+    };
+
+    private KorapTermSpan _setToCurrent (KorapTermSpan act) {
+	if (act.payload != null)
+	    act.payload.rewind();
+	log.trace("Set to current with {}, meaning {} - {}", act.toString(), act.payload.getInt(0), act.payload.getInt(4));
+	if (act.payload != null)
+	    act.payload.rewind();
+
+	position = act.start;
+	end = act.end;
+	storedPayload.clear();
+	hasStoredPayload = false;
+
+	if (act.payload != null) {
+	    log.trace("Payload is not null");
+	    act.payload.rewind();
+	    storedPayload.put(act.payload);
+	    hasStoredPayload = true;
+	}
+	else {
+	    log.trace("Payload is null");
+	};
+
+	return act;
+    };
+
+    private void _log_payloads (int nr) {
+	if (hasStoredPayload) {
+	    log.trace(
+		      "[{}] payload offsets are {}-{}",
+		      nr,
+		      storedPayload.getInt(0),
+		      storedPayload.getInt(4)
+		      );
+	}
+	else {
+	    log.trace("[{}] payload is empty", nr);
+	};
+    };
+
+    private void _setCurrentTo () {
+	overflow.start = position;
+	overflow.end = this.end();
+	overflow.payload.clear();
+	if (hasStoredPayload) {
+	    overflow.payload.put(storedPayload);
+	};
+	log.trace("Set current to Overflow {} with {}-{}", overflow.toString(), overflow.startChar(), overflow.endChar());
+    };
+
+    private void _setCurrentTo (KorapTermSpan o) {
+	_log_payloads(7);
+	o.start = position;
+	o.end = this.end();
+	o.payload.clear();
+	if (hasStoredPayload) {
+	    storedPayload.rewind();
+	    o.payload.put(storedPayload);
+	    log.trace("Object now has offset {}-{}", o.payload.getInt(0), o.payload.getInt(4));
+
+	    // Import:
+	    o.payload.rewind();
+	};
+	log.trace("Set current to object {} ...", o.toString());
+	if (hasStoredPayload) {
+	    log.trace("with {}-{} from {}-{}", o.startChar(), o.endChar(), storedPayload.getInt(0), storedPayload.getInt(4));
+	    storedPayload.rewind();
+	};
+    };
+
+
+    @Override
+    public boolean skipTo(int target) throws IOException {
+	assert target > doc;
+	doc = postings.advance(target);
+
+	end = -1;
+	overflow.clear();
+	storedPayload.clear();
+	hasStoredPayload = false;
+	if (memory != null)
+	    memory.clear();
+
+	if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+	    return false;
+	};
+
+	freq = postings.freq();
+	count = 0;
+	position = postings.nextPosition();
+	count++;
+	readPayload = false;
+	return true;
+    };
+
+    @Override
+    public int doc() {
+	return doc;
+    };
+
+    @Override
+    public int start() {
+	return position;
+    };
+
+    @Override
+    public int end() {
+	if (end >= 0)
+	    return end;
+
+	try {
+	    end = this.getPayloadEndPosition();
+	}
+	catch (Exception e) {
+	    end = position;
+	};
+	return end;
+    };
+
+    @Override
+    public long cost() {
+	return postings.cost();
+    };
+
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+	byte[] offsetCharacters = new byte[8];
+
+	if (storedPayload.position() <= 0)
+	    this.getPayloadEndPosition();
+
+	if (hasStoredPayload) {
+	    log.trace("storedPayload: {} - {}", storedPayload.getInt(0), storedPayload.getInt(4));
+	}
+	else {
+	    log.trace("storedPayload is empty");
+	};
+	System.arraycopy(storedPayload.array(), 0, offsetCharacters, 0, 8);
+
+	//	return Collections.singletonList(storedPayload.array());
+	return Collections.singletonList(offsetCharacters);
+    };
+
+    @Override
+    public boolean isPayloadAvailable() throws IOException {
+	return readPayload == false && postings.getPayload() != null;
+    };
+
+    @Override
+    public String toString() {
+	return "spans(" + term.toString() + ")@" +
+            (doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
+    };
+
+    public DocsAndPositionsEnum getPostings() {
+	return postings;
+    };
+
+    private int getPayloadEndPosition () {
+	log.trace("getPayloadEndPosition of element ...");
+
+	try {
+	    BytesRef payload = postings.getPayload();
+
+	    log.trace("  BytesRef: {}", payload.toString());
+	    readPayload = true;
+	    storedPayload.clear();
+	    hasStoredPayload = false;
+	    if (payload != null) {
+		log.trace("Do bit magic");
+		storedPayload.put(payload.bytes, payload.offset, 8);
+		storedPayload.put(payload.bytes, payload.offset + 12, payload.length - 12);
+		System.arraycopy(payload.bytes, payload.offset + 8, payloadByte, 0, 4);
+		hasStoredPayload = true;
+
+		log.trace("~~ Bytes: {}-{}-{}",
+			  storedPayload.getInt(0),
+			  storedPayload.getInt(4),
+			  payloadByte);
+	    }
+
+	    else {
+		log.trace("There's no payload available");
+		payloadByte = null;
+	    };
+
+	    if (payloadByte != null) {
+		bb.clear();
+		int t = bb.wrap(payloadByte).getInt();
+
+		log.trace("   |-> {}", t);
+		return t;
+	    };
+
+	}
+	catch (IOException e) {
+	    log.trace("IOException {}", e);	   
+	};
+	return -1;
+    };
+
+
+    private static final class EmptyElementSpans extends ElementSpans {
+
+	@Override
+	public boolean next() { return false; };
+
+	@Override
+	public boolean skipTo(int target) { return false; };
+
+	@Override
+	public int doc() { return DocIdSetIterator.NO_MORE_DOCS; };
+	
+	@Override
+	public int start() { return -1; };
+
+	@Override
+	public int end() { return -1; };
+
+	@Override
+	public Collection<byte[]> getPayload() { return null; };
+
+	@Override
+	public boolean isPayloadAvailable() { return false; };
+	
+	@Override
+	public long cost() { return 0; };
+    };
+    
+    public static final ElementSpans EMPTY_ELEMENT_SPANS = new EmptyElementSpans();
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapLongSpan.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapLongSpan.java
new file mode 100644
index 0000000..bd43681
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapLongSpan.java
@@ -0,0 +1,47 @@
+package de.ids_mannheim.korap.query.spans;
+
+import de.ids_mannheim.korap.query.spans.KorapSpan;
+
+import java.util.Collection;
+
+public class KorapLongSpan extends KorapSpan {
+    public Collection<byte[]> payload;
+    
+    @Override
+    public Object clone() {
+	KorapLongSpan span = new KorapLongSpan();
+	span.start = this.start;
+	span.end = this.end;
+	span.doc = this.doc;
+	span.payload.addAll(this.payload);
+	return span;
+    };
+
+    public KorapSpan copyFrom (KorapLongSpan o) {
+	super.copyFrom((KorapSpan) o);
+	this.payload.addAll(o.payload);
+	return this;
+    };
+
+    @Override
+    public void clearPayload () {
+	if (this.payload != null)
+	    this.payload.clear();
+    };
+
+    @Override
+    public void initPayload () {
+    };
+
+    @Override
+    public String toString () {
+	StringBuilder sb = new StringBuilder("[");
+	return sb.append(this.start).append('-')
+	    .append(this.end)
+	    .append('(').append(this.doc).append(')')
+	    //	    .append('$').append(this.payload.toString())
+	    .append(']')
+	    .toString();
+    };
+
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapSpan.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapSpan.java
new file mode 100644
index 0000000..c8a223e
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapSpan.java
@@ -0,0 +1,62 @@
+package de.ids_mannheim.korap.query.spans;
+
+import java.lang.StringBuilder;
+
+public abstract class KorapSpan implements Comparable<KorapSpan>, Cloneable {
+    public int start, end, doc;
+
+    public KorapSpan () {
+	this.start = -1;
+	this.end = -1;
+	initPayload();
+    };
+
+    public void clear () {
+	this.start = -1;
+	this.end = -1;
+	this.doc = -1;
+	clearPayload();
+    };
+
+    public KorapSpan copyFrom (KorapSpan o) {
+	this.start = o.start;
+	this.end = o.end;
+	this.doc = o.doc;
+	clearPayload();
+	return this;
+    };
+
+    public abstract void clearPayload ();
+    public abstract void initPayload ();
+
+    @Override
+    public int compareTo (KorapSpan o) {
+	/* optimizable for short numbers to return o.end - this.end */
+	if (this.doc < o.doc) {
+	    return -1;
+	}
+	else if (this.doc == o.doc) {
+	    if (this.start < o.start) {
+		return -1;
+	    }
+	    else if (this.start == o.start) {
+		if (this.end < o.end)
+		    return -1;
+	    };
+	};
+	return 1;
+    };
+
+    public String toString () {
+	StringBuilder sb = new StringBuilder("[");
+	return sb.append(this.start).append('-')
+	    .append(this.end)
+	    .append('(').append(this.doc).append(')')
+	    .append(']')
+	    .toString();
+    };
+
+    /*
+equals und hashcode implementieren
+     */
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapTermSpan.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapTermSpan.java
new file mode 100644
index 0000000..7fc1e57
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/KorapTermSpan.java
@@ -0,0 +1,77 @@
+package de.ids_mannheim.korap.query.spans;
+
+import java.nio.ByteBuffer;
+import de.ids_mannheim.korap.query.spans.KorapSpan;
+
+// TODO: Store payloads in 12 byte instead of the complicated ByteBuffer stuff!
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class KorapTermSpan extends KorapSpan {
+    public ByteBuffer payload;
+
+    private final Logger log = LoggerFactory.getLogger(WithinSpans.class);
+
+    @Override
+    public Object clone() {
+	KorapTermSpan span = new KorapTermSpan();
+	span.start = this.start;
+	span.end = this.end;
+	span.doc = this.doc;
+
+	this.payload.rewind();
+	span.payload.put(this.payload);
+
+	log.trace("Clone payload {} to payload {} ...",
+		  this.payload.toString(),
+		  span.payload.toString());
+	log.trace("... from {}-{} to {}-{}",
+		  this.startChar(),
+		  this.endChar(),
+		  span.startChar(),
+		  span.endChar());
+
+	return span;
+    };
+
+    public KorapSpan copyFrom (KorapTermSpan o) {
+	super.copyFrom((KorapSpan) o);
+	this.payload.put(o.payload);
+	return this;
+    };
+
+    @Override
+    public void clearPayload () {
+	if (this.payload != null) {
+	    this.payload.clear();
+	    this.payload.rewind();
+	};
+    };
+
+    @Override
+    public void initPayload () {
+	this.payload = ByteBuffer.allocate(128);
+    };
+
+
+    @Override
+    public String toString () {
+	StringBuilder sb = new StringBuilder("[");
+	return sb.append(this.start).append('-')
+	    .append(this.end)
+	    .append('(').append(this.doc).append(')')
+	    .append('$').append(this.payload.toString())
+	    .append(']')
+	    .toString();
+    };
+
+    public int startChar () {
+	return this.payload.getInt(0);
+    };
+
+    public int endChar () {
+	return this.payload.getInt(4);
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/MatchModifySpans.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/MatchModifySpans.java
new file mode 100644
index 0000000..2d785fa
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/MatchModifySpans.java
@@ -0,0 +1,176 @@
+package de.ids_mannheim.korap.query.spans;
+
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+
+import java.io.IOException;
+
+import java.util.Map;
+import java.util.ArrayList;
+import java.util.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+
+public class MatchModifySpans extends Spans {
+    private List<byte[]> highlightedPayload;
+    private Collection<byte[]> payload;
+    private final Spans spans;
+    private byte number;
+    private boolean divide;
+    private ByteBuffer bb;
+
+    private SpanQuery highlight;
+    private final Logger log = LoggerFactory.getLogger(MatchModifySpans.class);
+
+    private int start = -1, end;
+    private int tempStart, tempEnd = 0;
+
+
+    public MatchModifySpans (SpanQuery highlight,
+		       AtomicReaderContext context,
+		       Bits acceptDocs,
+		       Map<Term,TermContext> termContexts,
+		       byte number,
+		       boolean divide) throws IOException {
+	spans = highlight.getSpans(context, acceptDocs, termContexts);
+	this.number = number;
+	this.divide = divide;
+	this.highlight = highlight;
+	this.highlightedPayload = new ArrayList<byte[]>(6);
+	bb = ByteBuffer.allocate(9);
+    };
+
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+	return highlightedPayload;
+    };
+
+    @Override
+    public boolean isPayloadAvailable() {
+	return highlightedPayload.isEmpty() == false;
+    };
+
+    public int doc() { return spans.doc(); }
+
+    // inherit javadocs
+    @Override
+    public int start() { return start; }
+
+    // inherit javadocs
+    @Override
+    public int end() { return end; }
+
+
+    // inherit javadocs
+    @Override
+    public boolean next() throws IOException {
+	log.trace("||> Forward next");
+
+	highlightedPayload.clear();
+
+	/*
+	  Bei divide:
+	  Ist der Speicher leer?
+	  Sonst der nächste Treffer vom Speicher!
+	*/
+
+	if (spans.next()) {
+	    start = -1;
+	    if (spans.isPayloadAvailable()) {
+		end = 0;
+
+		for (byte[] payload : spans.getPayload()) {
+		    bb.clear();
+		    bb.put(payload);
+		    //		    bb = ByteBuffer.wrap(payload, 0, 10);
+		    bb.position(8);
+
+		    // Todo: Implement Divide
+		    if (payload.length == 9 && bb.get() == this.number) {
+			bb.rewind();
+			tempStart = bb.getInt();
+			tempEnd = bb.getInt();
+
+			log.trace("Found matching class {}-{}", tempStart, tempEnd);
+
+			if (start == -1)
+			    start = tempStart;
+			else if (tempStart < start)
+			    start = tempStart;
+
+			if (tempEnd > end)
+			    end = tempEnd;
+		    }
+		    else {
+			log.trace("Remember old payload {}", payload);
+			highlightedPayload.add(payload);
+		    };
+		};
+
+		log.trace("All payload processed, now clean up");
+
+		if (start != -1) {
+		    int i = highlightedPayload.size() - 1;
+
+		    for (; i >= 0; i--) {
+			bb.clear();
+			bb.put(highlightedPayload.get(i),0,8);
+			bb.rewind();
+			if (bb.getInt() < start || bb.getInt() > end) {
+			    bb.rewind();
+			    log.trace("Remove highlight {} with {}-{} for {}-{}", i, bb.getInt(), bb.getInt(), start, end);
+			    highlightedPayload.remove(i);
+			    continue;
+			};
+ 			bb.rewind();
+			log.trace("Highlight {} will stay with {}-{} for {}-{}", i, bb.getInt(), bb.getInt(), start, end);
+		    };
+		    /*
+		     * Todo: SPLIT
+		     * Vorsicht! Bei divide könnten Payloads mehrmals vergeben werden
+		     * müssen!
+		     */
+		};
+	    };
+
+
+	    if (start == -1) {
+		start = spans.start();
+		end = spans.end();
+	    }
+	    else {
+		log.trace("Start to shrink to {} - {} class: {}",
+			  start, end, number);
+	    };
+
+	    return true;
+	};
+	return false;
+    };
+
+    // inherit javadocs
+    @Override
+    public boolean skipTo(int target) throws IOException {
+	return spans.skipTo(target);
+    };
+
+    @Override
+    public String toString() {
+	return getClass().getName() + "(" + this.highlight.toString() + ")@" +
+	    (doc() + ":" + start() + "-" + end());
+    };
+
+
+    @Override
+    public long cost() {
+	return spans.cost();
+    }
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/NextSpans.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/NextSpans.java
new file mode 100644
index 0000000..b6080fe
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/NextSpans.java
@@ -0,0 +1,408 @@
+package de.ids_mannheim.korap.query.spans;
+
+/* Inspired by NearSpansOrdered
+ *
+ * REIMPLEMENTATION
+ *
+ */
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import de.ids_mannheim.korap.query.SpanNextQuery;
+
+// Todo: Disable the option to discard payloads
+
+import java.util.*;
+import java.io.*;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** From Spans.java:
+ * Expert: an enumeration of span matches.  Used to implement span searching.
+ * Each span represents a range of term positions within a document.  Matches
+ * are enumerated in order, by increasing document number, within that by
+ * increasing start position and finally by increasing end position. */
+public class NextSpans extends Spans {
+    private boolean firstTime = true;
+    private boolean more = false;
+
+    // Initialize as invalid
+    private int matchDoc   = -1;
+    private int matchStart = -1;
+    private int matchEnd   = -1;
+
+    /** Indicates that all both spans have the same doc() */
+    private boolean inSameDoc = false;
+
+    // First span
+    private final Spans firstSpans;
+    private final Spans firstSpansByDoc;
+
+    // Second span
+    private final Spans secondSpans;
+    private final Spans secondSpansByDoc;
+
+    private SpanNextQuery query;
+
+    private List<byte[]> matchPayload;
+    private boolean collectPayloads = true;
+
+    private final static Logger log = LoggerFactory.getLogger(NextSpans.class);
+
+    // Constructor
+    public NextSpans (SpanNextQuery spanNextQuery,
+		      AtomicReaderContext context,
+		      Bits acceptDocs,
+		      Map<Term,TermContext> termContexts) throws IOException {
+	this(spanNextQuery, context, acceptDocs, termContexts, true);
+    };
+
+    // Constructor
+    public NextSpans (SpanNextQuery spanNextQuery,
+		      AtomicReaderContext context,
+		      Bits acceptDocs,
+		      Map<Term,TermContext> termContexts,
+		      boolean collectPayloads) throws IOException {
+
+	log.trace("Init NextSpans");
+
+	//	this.collectPayloads = collectPayloads;
+
+	// Init copies
+	matchPayload = new LinkedList<byte[]>();
+
+	firstSpans = spanNextQuery.firstClause().getSpans(
+	    context, acceptDocs, termContexts
+        );
+	firstSpansByDoc = firstSpans; // used in toSameDoc()
+
+	secondSpans = spanNextQuery.secondClause().getSpans(
+            context, acceptDocs, termContexts
+        );
+	secondSpansByDoc = secondSpans; // used in toSameDoc()
+
+	/*
+	if (DEBUG) {
+	    System.err.println("***");
+	    while (subSpans[i].next()) {
+		StringBuffer payloadString = new StringBuffer();
+		int docid = subSpans[i].doc();
+		System.err.println("Span: "+i+" Doc: " + docid + " with " + subSpans[i].start() + "-" + subSpans[i].end() + " || " + payloadString.toString());
+	    };
+	};
+	*/
+	query = spanNextQuery; // kept for toString() only.
+    };
+
+
+    /** Move to the next match, returning true iff any such exists. */
+    @Override
+    public boolean next () throws IOException {
+	log.trace("Next with doc {}", matchDoc);
+
+	// Check for init next
+	if (firstTime) {
+	    log.trace("First retrieval of NextSpans");
+	    firstTime = false;
+	    if (!firstSpans.next() || !secondSpans.next()) {
+		log.trace("No next in firstSpan nor in secondSpan");
+		more = false;
+		return false;
+	    };
+	    log.trace("Spans are initialized");
+	    more = true;
+	};
+
+	//	if (collectPayloads)
+	    matchPayload.clear();
+
+	return advance();
+    };
+
+
+    /** Skips to the first match beyond the current, whose document number is
+     * greater than or equal to <i>target</i>. <p>Returns true iff there is such
+     * a match.  <p>Behaves as if written: <pre class="prettyprint">
+     *   boolean skipTo(int target) {
+     *     do {
+     *       if (!next())
+     *         return false;
+     *     } while (target > doc());
+     *     return true;
+     *   }
+     * </pre>
+     * Most implementations are considerably more efficient than that.
+     */
+    public boolean skipTo (int target) throws IOException {
+	log.trace("skipTo {}", target);
+
+	// Check for init next
+	if (firstTime) {
+	    firstTime = false;
+	    if (!firstSpans.next() && !secondSpans.next()) {
+		more = false;
+		return false;
+	    };
+	    more = true;
+	}
+
+	// There are more spans, but the doc has to be skipped to target
+	// Warning: This only skips firstSpans!
+	//          Maybe that's wrong ...
+	else if (more && (firstSpans.doc() < target)) {
+	    if (firstSpans.skipTo(target)) {
+		inSameDoc = false;
+	    }
+
+	    else {
+		more = false;
+		return false;
+	    };
+	};
+
+	//	if (collectPayloads)
+	    matchPayload.clear();
+
+	return advance();
+    };
+
+
+    /** Advance the subSpans to the same document */
+    private boolean toSameDoc() throws IOException {
+	log.trace("toSameDoc");
+
+	if (firstSpansByDoc.doc() < secondSpansByDoc.doc()) {
+	    if (!firstSpansByDoc.skipTo(secondSpansByDoc.doc())) {
+		more = false;
+		inSameDoc = false;
+		return false;
+	    };
+	}
+	else if (firstSpansByDoc.doc() > secondSpansByDoc.doc()) {
+	    if (!secondSpansByDoc.skipTo( firstSpansByDoc.doc() )) {
+		more = false;
+		inSameDoc = false;
+		return false;
+	    };
+	};
+	inSameDoc = true;
+	return true;
+    };
+
+
+    /** Advances the subSpans to just after an ordered match with a minimum slop
+     * that is smaller than the slop allowed by the SpanNearQuery.
+     * @return true iff there is such a match.
+     */
+    private boolean advance() throws IOException {
+	log.trace("advance");
+	boolean match = false;
+
+	// There are more spans, and both spans are either in the
+	// same doc or can be forwarded to the same doc.
+	while (more && (inSameDoc || toSameDoc())) {
+
+	    log.trace("More spans in the same Doc: {}", firstSpansByDoc.doc());
+	    
+	    /* spans are in the same doc and in the correct order next to each other */
+	    if (match()) {
+
+		// start and end position of last span
+		matchStart = firstSpans.start();
+		matchEnd = secondSpans.end();
+
+		log.trace("Matching: {}-{}", matchStart, matchEnd);
+
+		log.trace("Check for payloads");
+
+
+		//		if (collectPayloads) {
+		    log.trace("copy payloads");
+
+		    if (firstSpans.isPayloadAvailable()) {
+			Collection<byte[]> payload = firstSpans.getPayload();
+			log.trace("Found {} payloads in firstSpans", payload.size());
+			matchPayload.addAll(payload);
+		    };
+		    if (secondSpans.isPayloadAvailable()) {
+			Collection<byte[]> payload = secondSpans.getPayload();
+			log.trace("Found {} payloads in secondSpans", payload.size());
+			matchPayload.addAll(payload);
+		    };
+		    //		};
+
+		log.trace("=> MATCH");
+		match = true;
+		break;
+	    };
+	};
+
+	log.trace("Forward secondSpans");
+	if (!secondSpans.next()) {
+	    log.trace("No more secondSpans");
+	    more = false;
+	};
+	inSameDoc = false;
+	return match;
+    };
+
+
+    /** Returns the document number of the current match.  Initially invalid. */
+    @Override
+    public int doc () {
+	return matchDoc;
+    };
+
+    /** Returns the start position of the current match.  Initially invalid. */
+    @Override
+    public int start () {
+	return matchStart;
+    };
+
+    /** Returns the end position of the current match.  Initially invalid. */
+    @Override
+    public int end () {
+	return matchEnd;
+    };
+
+    /**
+     * Returns the payload data for the current span.
+     * This is invalid until {@link #next()} is called for
+     * the first time.
+     * This method must not be called more than once after each call
+     * of {@link #next()}. However, most payloads are loaded lazily,
+     * so if the payload data for the current position is not needed,
+     * this method may not be called at all for performance reasons. An ordered
+     * SpanQuery does not lazy load, so if you have payloads in your index and
+     * you do not want ordered SpanNearQuerys to collect payloads, you can
+     * disable collection with a constructor option.<br>
+     * <br>
+     * Note that the return type is a collection, thus the ordering should not be relied upon.
+     * <br/>
+     * @lucene.experimental
+     *
+     * @return a List of byte arrays containing the data of this payload, otherwise null if isPayloadAvailable is false
+     * @throws IOException if there is a low-level I/O error
+     */
+    // public abstract Collection<byte[]> getPayload() throws IOException;
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+	log.trace("Payload is requested with payload count {}", matchPayload.size());
+	return matchPayload;
+    };
+    
+
+    /**
+     * Checks if a payload can be loaded at this position.
+     * <p/>
+     * Payloads can only be loaded once per call to
+     * {@link #next()}.
+     *
+     * @return true if there is a payload available at this position that can be loaded
+     */
+    @Override
+    public boolean isPayloadAvailable() {
+	log.trace("Check for payload emptyness: {}", matchPayload.isEmpty());
+
+	return matchPayload.isEmpty() == false;
+    };
+
+
+    // Todo: This may be in the wrong version
+    @Override
+    public long cost() {
+	return Math.min(firstSpans.cost(), secondSpans.cost());
+    };
+
+
+    @Override
+    public String toString() {
+	return getClass().getName() + "("+query.toString()+")@"+
+	    (firstTime?"START":(more?(doc()+":"+start()+"-"+end()):"END"));
+    };
+
+
+    public boolean match () throws IOException {
+	matchDoc = firstSpans.doc();
+	log.trace("Check for next match");
+
+	byte check;
+	while (inSameDoc && ((check = docNext(firstSpans, secondSpans)) != (byte) 0)) {
+
+	    log.trace("There's no match");
+
+	    if ((check == (byte) -1) && !secondSpans.next()) {
+		log.trace("No more secondSpans");
+		inSameDoc = false;
+		more = false;
+		break;
+	    }
+	    else if (check == (byte) 1 && !firstSpans.next()) {
+		log.trace("No more firstSpans");
+		inSameDoc = false;
+		more = false;
+		break;
+	    }
+	    else if (matchDoc != secondSpans.doc()) {
+		log.trace("secondSpans has another doc");
+		inSameDoc = false;
+		break;
+	    };
+	};
+	return inSameDoc;
+    };
+
+
+    /** Check whether two Spans in the same document are ordered.
+     * @return true iff spans1 starts before spans2
+     *              or the spans start at the same position,
+     *              and spans1 ends before spans2.
+     */
+    static final byte docNext (Spans spans1, Spans spans2) {
+	// check does
+	int start1 = spans1.start();
+	int start2 = spans2.start();
+
+	//	boolean val = (start1 == start2) ? (spans1.end() < spans2.end()) : (start1 < start2);
+	byte val;
+	if (start1 >= start2) {
+	    val = (byte) -1;
+	}
+	else {
+	    int end1 = spans1.end();
+	    if (end1 == start2) {
+		val = (byte) 0;
+	    }
+	    else if (end1 > start2) {
+		val = (byte) -1;
+	    }
+	    else {
+		val = (byte) 1;
+	    };
+	}
+	// -1: forward secondSpans
+	// 1: forward firstSpans
+
+	log.trace("{}-{} next to {}-{}", start1, spans1.end(), start2, spans2.end());
+	log.trace("docSpansOrdered: {}", val);
+
+	return val;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/spans/WithinSpans.java b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/WithinSpans.java
new file mode 100644
index 0000000..a2e4b86
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/spans/WithinSpans.java
@@ -0,0 +1,718 @@
+package de.ids_mannheim.korap.query.spans;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.search.spans.Spans;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.DocIdSetIterator;
+
+import java.nio.ByteBuffer;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import de.ids_mannheim.korap.query.SpanWithinQuery;
+import de.ids_mannheim.korap.query.spans.KorapLongSpan;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.io.*;
+
+/*
+<a>x<b>y<c>z[1]</c>z[2]</b>z[3]</a>
+
+Erst:
+a mit ?
+-> fetch empty
+-> [].next -> 1
+=> match!
+-> speicher [1]
+a mit ?
+-> fetch gecheckt! (WIE MERKE ICH MIR DAS?)
+-> [].next -> 1
+=> match!
+-> speicher [2]
+a mit [3]
+=> match!
+-> speicher [3]
+b mit ?
+-> fetch [1]
+=> match!
+-> wenn [1].end <= b.end
+   -> speicher [1]
+b mit ?
+-> fetch [2]
+=> match!
+-> wenn [2].end <= b.end
+   -> speicher [2]
+
+Speicher: start, end, payload, wrapStart, wrapEnd
+
+Problem: Es ist nicht garantiert, dass bei
+<a><b><c>x</c>y</b>z</a>
+die Wrapreihenfolge a,b,c rauskommt!
+*/
+
+public class WithinSpans extends Spans {
+    private boolean firstTime = true;
+    private boolean more = false;
+
+    // Initialize as invalid
+    private int matchDoc   = -1;
+    private int matchStart = -1;
+    private int matchEnd   = -1;
+
+    /** Indicates that the wrap and the embedded spans are in the same doc */
+    private boolean inSameDoc = false;
+    private int wrapDoc;
+    private int embeddedDoc;
+    private int wrapStart, wrapEnd, embeddedStart, embeddedEnd;
+    private Collection<byte[]> embeddedPayload;
+
+    // Wrap span
+    private final Spans wrapSpans;
+    private final Spans wrapSpansByDoc; // Necessary?
+
+    // Embedded span
+    private final Spans embeddedSpans;
+    private final Spans embeddedSpansByDoc;
+
+    private SpanWithinQuery query;
+
+    private LinkedList<KorapLongSpan> spanStore1, spanStore2;
+
+    private List<byte[]> matchPayload;
+
+    private short flag;
+
+    private final Logger log = LoggerFactory.getLogger(WithinSpans.class);
+
+    // Constructor
+    public WithinSpans (SpanWithinQuery spanWithinQuery,
+			AtomicReaderContext context,
+			Bits acceptDocs,
+			Map<Term,TermContext> termContexts,
+			short flag) throws IOException {
+
+	log.trace("Init WithinSpans");
+
+	// Init copies
+	this.matchPayload = new LinkedList<byte[]>();
+
+	// Get span
+	this.wrapSpans = spanWithinQuery.wrap().getSpans(context, acceptDocs, termContexts);
+	this.wrapSpansByDoc = wrapSpans; // used in toSameDoc()
+
+	this.embeddedSpans = spanWithinQuery.embedded().getSpans(context, acceptDocs, termContexts);
+	this.embeddedSpansByDoc = embeddedSpans; // used in toSameDoc()
+
+	this.flag = flag;
+
+	this.spanStore1 = new LinkedList<KorapLongSpan>();
+	this.spanStore2 = new LinkedList<KorapLongSpan>();
+	
+	this.query = spanWithinQuery; // kept for toString() only.
+    };
+
+
+    /** Move to the next match, returning true iff any such exists. */
+    @Override
+    public boolean next () throws IOException {
+	log.trace("Next with doc {}", matchDoc);
+
+	// Check for init next
+	if (firstTime) {
+	    firstTime = false;
+	    if (!embeddedSpans.next() || !wrapSpans.next()) {
+		log.trace("No next in firstSpan nor in secondSpan 1");
+		more = false;
+		return false;
+	    };
+	    log.trace("Spans are initialized");
+	    more = true;
+	    wrapStart = wrapSpans.start();
+	    wrapEnd = wrapSpans.end();
+	    wrapDoc = matchDoc = wrapSpans.doc();
+
+	    embeddedStart = embeddedSpans.start();
+	    embeddedEnd = embeddedSpans.end();
+	    embeddedDoc = embeddedSpans.doc();
+
+	    if (embeddedSpans.isPayloadAvailable()) {
+		Collection<byte[]> payload = embeddedSpans.getPayload();
+		embeddedPayload = new ArrayList<byte[]>(payload.size());
+		embeddedPayload.addAll(payload);
+	    };
+
+	    log.trace("Init spans: {}", _actPos());
+
+	    if (embeddedDoc == matchDoc) {
+		inSameDoc = true;
+		log.trace("Is now inSameDoc");
+	    }
+	    else {
+		log.trace("Is not inSameDoc");
+	    };
+	    log.trace("Next with doc {} (wrap) and {} (embedded)", wrapDoc, embeddedDoc);
+	};
+
+	matchPayload.clear();
+	return advanceAfterCheck();
+    };
+
+
+    /** Advances the subSpans to just after a within match.
+     * @return true iff there is such a match.
+     */
+    private boolean advanceAfterCheck() throws IOException {
+	log.trace("advanceAfterChecked inSameDoc: {} and more: {}", inSameDoc, more);
+	log.trace("advanceAfterCheck with doc {} (wrap) and {} (embedded)", wrapDoc, embeddedDoc);
+
+	// There are more spans, and both spans are either in the
+	// same doc or can be forwarded to the same doc.
+	while (more && (inSameDoc || toSameDoc())) {
+
+	    log.trace("There are more spans in doc {}", embeddedDoc);
+	    
+	    /* spans are in the same doc */
+	    if (within()) {
+		return true;
+	    }
+	    else {
+		log.trace("No within");
+	    };
+	};
+
+	log.trace("No more matches");
+
+	return false; // no more matches
+    };
+
+
+    /** Advance the subSpans to the same document */
+    private boolean toSameDoc () throws IOException {
+	log.trace("toSameDoc");
+
+	/*
+	wrapDoc = wrapSpansByDoc.doc();
+	embeddedDoc = embeddedSpansByDoc.doc();
+
+	*/
+
+	if (wrapDoc != embeddedDoc) {
+	    log.trace("Docs not identical: {} vs {}", wrapDoc, embeddedDoc);
+
+	    spanStore1.clear(); // = new LinkedList<KorapLongSpan>();
+	    spanStore2.clear(); // = new LinkedList<KorapLongSpan>();
+
+	    if (wrapDoc < embeddedDoc) {
+		log.trace("Skip wrap from {} to {}", wrapDoc, embeddedDoc);
+		if (!wrapSpansByDoc.skipTo(embeddedDoc)) {
+		    more = false;
+		    inSameDoc = false;
+		    return false;
+		};
+		wrapDoc = wrapSpans.doc();
+	    }
+	    else if (wrapDoc > embeddedDoc) {
+		log.trace("Skip embedded from {} to {}", embeddedSpans.doc(), wrapDoc);
+		//		if (!embeddedSpansByDoc.skipTo( wrapDoc )) {
+		if (wrapDoc != embeddedSpans.doc()) {
+		    if (embeddedSpans.doc() == DocIdSetIterator.NO_MORE_DOCS || !embeddedSpans.skipTo( wrapDoc )) {
+			more = false;
+			inSameDoc = false;
+			return false;
+		    };
+		}
+		else {
+		    _add_current();
+		    //		    embeddedDoc = embeddedSpans.doc();
+		};
+	    };
+	}
+	else {
+	    log.trace("Docs identical");
+	};
+	embeddedStart = embeddedSpans.start();
+	embeddedEnd = embeddedSpans.end();
+	log.trace("The new embedded start is {}-{}", embeddedStart, embeddedEnd);
+	inSameDoc = true;
+	return true;
+    };
+
+
+    /** Skips to the first match beyond the current, whose document number is
+     * greater than or equal to <i>target</i>. <p>Returns true iff there is such
+     * a match.  <p>Behaves as if written: <pre class="prettyprint">
+     *   boolean skipTo(int target) {
+     *     do {
+     *       if (!next())
+     *         return false;
+     *     } while (target > doc());
+     *     return true;
+     *   }
+     * </pre>
+     * Most implementations are considerably more efficient than that.
+     */
+    public boolean skipTo (int target) throws IOException {
+	log.trace("skipTo {}", target);
+
+	// Check for init next
+	if (firstTime) {
+	    firstTime = false;
+	    if (!embeddedSpans.next() || !wrapSpans.next()) {
+		log.trace("No next in firstSpan nor in secondSpan 2");
+		more = false;
+		return false;
+	    };
+	    more = true;
+	    wrapStart = wrapSpans.start();
+	    wrapEnd = wrapSpans.end();
+	    wrapDoc = embeddedSpans.doc();
+	    embeddedStart = embeddedSpans.start();
+	    embeddedEnd = embeddedSpans.end();
+	    embeddedDoc = embeddedSpans.doc();
+	}
+
+	/*
+	  See NextSpans for the same problem!
+	  This should be dealt with in toSameDoc!!!
+	 */
+	else if (more && (embeddedSpans.doc() < target)) {
+	    if (embeddedSpans.skipTo(target)) {
+		inSameDoc = false;
+		embeddedDoc = target;
+	    }
+
+	    // Can't be skipped to target
+	    else {
+		more = false;
+		return false;
+	    };
+	};
+
+	matchPayload.clear();
+	return advanceAfterCheck();
+    };
+
+    private String _actPos () {
+	StringBuilder sb = new StringBuilder();
+	sb.append("<").append(wrapStart).append('-').append(wrapEnd).append('>');
+	sb.append(embeddedStart).append('-').append(embeddedEnd);
+	sb.append("</>");
+	return sb.toString();
+    };
+
+
+    private boolean within () throws IOException {
+	log.trace("within");
+	
+	while (more && inSameDoc) {
+
+	    // Case 1-5
+	    // Case 1
+	    //     |---|
+	    // |-|
+	    // Case 2
+	    //   |---|
+	    // |-|
+	    // Case 3
+	    //   |---|
+	    // |---|
+	    // Case 4
+	    //   |-|
+	    // |---|
+	    // Case 5
+	    //  |-|"
+	    // |---|
+	    if (wrapStart > embeddedStart) {
+		log.trace("[Case] 1-5 with {}", _actPos());
+
+		if (this.fetchNext()) {
+		    continue;
+		};
+
+		// Forward wrapSpan
+		if (wrapSpans.next()) {
+		    wrapDoc = wrapSpans.doc();
+		    if (this.toSameDoc()) {
+			wrapStart = wrapSpans.start();
+			wrapEnd = wrapSpans.end();
+			continue;
+		    };
+		};
+
+		this.more = false;
+		this.inSameDoc = false;
+		return false;
+	    };
+
+	    // Get wrapEnd
+	    // wrapEnd = wrapSpans.end();
+
+	    KorapLongSpan embedded = new KorapLongSpan();
+	    embedded.start = embeddedStart;
+	    embedded.end = embeddedEnd;
+	    embedded.doc = embeddedDoc;
+	    if (embeddedPayload != null)
+		embedded.payload = embeddedPayload;
+
+	    this.spanStore1.add(embedded);
+	    log.trace("pushed to spanStore1: {}", embedded.toString());
+
+
+	    // Case 12
+	    // |---|
+	    //     |-|
+	    // Case 13
+	    // |---|
+	    //       |-|
+	    if (wrapEnd <= embeddedStart) {
+		log.trace("[Case] 12-13 with {}", _actPos());
+
+		// Copy content of spanStores
+		if (!spanStore1.isEmpty()) {
+		    log.trace("First store is not empty - copy to second store!");
+		    spanStore2.addAll(0, (LinkedList<KorapLongSpan>) spanStore1.clone());
+		    spanStore1.clear();
+		    log.trace("Second store is now: {}", spanStore2.toString());
+		};
+
+		// Forward wrapSpan
+		log.trace("Try to forward wrapspan");
+
+		if (wrapSpans.next()) {
+		    wrapDoc = wrapSpans.doc();
+		    log.trace("wrapDoc is now {} while embeddedDoc is {}", wrapDoc, embeddedDoc);
+		    if (this.toSameDoc()) {
+			wrapStart = wrapSpans.start();
+			wrapEnd = wrapSpans.end();
+			if (fetchTwoNext())
+			    continue;
+		    };
+		}
+		else {
+		    log.trace("Unable to forward wrapspan");
+		};
+
+		this.inSameDoc = false;
+		this.more = false;
+		return false;
+	    }
+
+
+	    // Case 6 - 8
+	    else if (wrapStart == embeddedStart) {
+
+		// Case 6
+		// |---|
+		// |-|
+		if (wrapEnd > embeddedEnd) {
+		    log.trace("[Case] 6 with {}", _actPos());
+
+		    // neither match nor endWith
+		    if (this.flag < (short) 2) {
+			_setMatch(embedded);
+			log.trace("MATCH1!! with {}", _actPos());
+			fetchTwoNext();
+			return true;
+		    };
+
+		    fetchTwoNext();
+		    continue;
+		}
+
+		// Case 7
+		// |---|
+		// |---|
+		else if (wrapEnd == embeddedEnd) {
+		    log.trace("[Case] 7 with {}", _actPos());
+
+		    _setMatch(embedded);
+		    log.trace("MATCH2!! with {}", _actPos());
+		    fetchTwoNext();
+		    return true;
+		};
+
+		// Case 8
+		// |-|
+		// |---|
+		// wrapEnd < embeddedEnd
+		log.trace("[Case] 8 with {}", _actPos());
+		fetchTwoNext();
+		continue;
+	    };
+
+	    // Case 9-11
+	    // wrapStart < wrapEnd
+
+	    // Case 9
+	    // |---|
+	    //  |-|
+	    if (wrapEnd > embeddedEnd) {
+		log.trace("[Case] 9 with {}", _actPos());
+
+		// neither match nor endWith
+		if (this.flag == (short) 0) {
+		    _setMatch(embedded);
+		    log.trace("MATCH3!! with {}", _actPos());
+		    fetchTwoNext();
+		    return true;
+		};
+
+		fetchTwoNext();
+		continue;
+	    }
+	    // Case 10
+	    // |---|
+	    //   |-|
+	    else if (wrapEnd == embeddedEnd) {
+		log.trace("[Case] 10 with {}", _actPos());
+
+		// neither match nor endWith
+		if (this.flag == (short) 0 || this.flag == (short) 2) {
+		    _setMatch(embedded);
+		    log.trace("MATCH4!! with {}", _actPos());
+		    fetchTwoNext();
+		    return true;
+		};
+
+		fetchTwoNext();
+		continue;
+	    };
+
+	    // Case 11
+	    // |---|
+	    //   |---|
+	    // wrapEnd < embeddedEnd
+	    log.trace("[Case] 11 with {}", _actPos());
+	    fetchTwoNext();
+	    continue;
+	};
+
+	this.more = false;
+	return false;
+    };
+
+
+    private boolean fetchNext () throws IOException {
+
+	// Fetch span from first store
+	if (spanStore1.isEmpty()) {
+	    log.trace("First store is empty");
+	    return fetchTwoNext();
+	};
+
+	KorapLongSpan current = spanStore1.removeFirst();
+	log.trace("Fetch from first store: {}", current.toString());
+
+	embeddedStart = current.start;
+	embeddedEnd = current.end;
+	embeddedDoc = current.doc;
+	if (current.payload != null)
+	    embeddedPayload = current.payload;
+
+	return true;
+    };
+
+
+    private boolean fetchTwoNext () throws IOException {
+
+	// Fetch span from second store
+	if (spanStore2.isEmpty()) {
+	    log.trace("Second store is empty");
+
+	    // Forward spans
+	    if (this.embeddedSpans.next()) {
+		log.trace("Forwarded embeddedSpans");
+
+		if (this.embeddedSpans.doc() != wrapDoc && !spanStore1.isEmpty()) {
+
+		    log.trace("No docmatch and still stuff in store");
+		    log.trace("First store is not empty - copy to second store!");
+		    spanStore2.addAll(0, (LinkedList<KorapLongSpan>) spanStore1.clone());
+		    spanStore1.clear();
+
+		    _add_current();
+
+		    log.trace("Second store is now: {}", spanStore2.toString());
+		}
+		else {
+		    embeddedStart = embeddedSpans.start();
+		    embeddedEnd = embeddedSpans.end();
+		    embeddedDoc = embeddedSpans.doc();
+
+		    if (embeddedSpans.isPayloadAvailable()) {
+			Collection<byte[]> payload = embeddedSpans.getPayload();
+			// Maybe just clear
+			embeddedPayload = new ArrayList<byte[]>(payload.size());
+			embeddedPayload.addAll(payload);
+		    };
+
+		    return this.toSameDoc();
+		};
+	    }
+	    else {
+		log.trace("Forwarded embeddedSpans failed");
+	    };
+
+	    log.trace("EmbeddedDoc: " + embeddedDoc);
+
+	    // Forward wrapSpan
+	    log.trace("Try to forward wrapspan");
+	    if (wrapSpans.next()) {
+		wrapDoc = wrapSpans.doc();
+		if (this.toSameDoc()) {	    
+		    wrapStart = wrapSpans.start();
+		    wrapEnd = wrapSpans.end();
+
+		    log.trace("WrapSpan forwarded");
+
+		    // Copy content of spanStores
+		    if (!spanStore1.isEmpty()) {
+			log.trace("First store is not empty - copy to second store!");
+			spanStore2.addAll(0, (LinkedList<KorapLongSpan>) spanStore1.clone());
+			spanStore1.clear();
+			log.trace("Second store is now: {}", spanStore2.toString());
+		    };
+
+		    return this.fetchTwoNext();
+		};
+	    };
+
+	    // Don't know.
+	    log.trace("No more fetchNext()");
+
+	    more = false;
+	    return false;
+	};
+
+	KorapLongSpan current = spanStore2.removeFirst();
+	log.trace("Fetch from second store: {}", current.toString());
+
+	embeddedStart = current.start;
+	embeddedEnd = current.end;
+	embeddedDoc = current.doc;
+	embeddedPayload = current.payload;
+
+	return true;
+    };
+
+
+    /*
+TODO: Maybe ignore "embedded" parameter and use embeddedPayload directly
+     */
+    private void _setMatch (KorapLongSpan embedded) throws IOException {
+	matchStart = wrapStart;
+	matchEnd = wrapEnd;
+	matchDoc = embeddedDoc;
+	matchPayload.clear();
+
+	if (embedded.payload != null)
+	    matchPayload.addAll(embedded.payload);
+
+	if (wrapSpans.isPayloadAvailable()) {
+	    Collection<byte[]> payload = wrapSpans.getPayload();
+	    matchPayload.addAll(payload);
+	};
+    };
+
+
+    private void _add_current () throws IOException{
+	KorapLongSpan embedded = new KorapLongSpan();
+	embedded.start = embeddedSpans.start();
+	embedded.end = embeddedSpans.end();
+	embedded.doc = embeddedSpans.doc();
+
+	if (embeddedSpans.isPayloadAvailable()) {
+	    Collection<byte[]> payload = embeddedSpans.getPayload();
+	    embedded.payload = new ArrayList<byte[]>(payload.size());
+	    embedded.payload.addAll(payload);
+	};
+
+	this.spanStore2.add(embedded);	    
+	log.trace("pushed to spanStore2: {}", embedded.toString());  
+    };
+
+
+    /** Returns the document number of the current match.  Initially invalid. */
+    @Override
+    public int doc () {
+	return matchDoc;
+    };
+
+    /** Returns the start position of the embedding wrap.  Initially invalid. */
+    @Override
+    public int start () {
+	return matchStart;
+    };
+
+    /** Returns the end position of the embedding wrap.  Initially invalid. */
+    @Override
+    public int end () {
+	return matchEnd;
+    };
+
+    /**
+     * Returns the payload data for the current span.
+     * This is invalid until {@link #next()} is called for
+     * the first time.
+     * This method must not be called more than once after each call
+     * of {@link #next()}. However, most payloads are loaded lazily,
+     * so if the payload data for the current position is not needed,
+     * this method may not be called at all for performance reasons. An ordered
+     * SpanQuery does not lazy load, so if you have payloads in your index and
+     * you do not want ordered SpanNearQuerys to collect payloads, you can
+     * disable collection with a constructor option.<br>
+     * <br>
+     * Note that the return type is a collection, thus the ordering should not be relied upon.
+     * <br/>
+     * @lucene.experimental
+     *
+     * @return a List of byte arrays containing the data of this payload, otherwise null if isPayloadAvailable is false
+     * @throws IOException if there is a low-level I/O error
+     */
+    // public abstract Collection<byte[]> getPayload() throws IOException;
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+	return matchPayload;
+    };
+    
+
+    /**
+     * Checks if a payload can be loaded at this position.
+     * <p/>
+     * Payloads can only be loaded once per call to
+     * {@link #next()}.
+     *
+     * @return true if there is a payload available at this position that can be loaded
+     */
+    @Override
+    public boolean isPayloadAvailable() {
+	return matchPayload.isEmpty() == false;
+    };
+
+    // Todo: This may be in the wrong version
+    @Override
+    public long cost() {
+	return Math.min(wrapSpans.cost(), embeddedSpans.cost());
+    };
+
+    @Override
+    public String toString() {
+	return getClass().getName() + "("+query.toString()+")@"+
+	    (firstTime?"START":(more?(doc()+":"+start()+"-"+end()):"END"));
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanAlterQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanAlterQueryWrapper.java
new file mode 100644
index 0000000..ec2d254
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanAlterQueryWrapper.java
@@ -0,0 +1,59 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import de.ids_mannheim.korap.query.wrap.SpanRegexQueryWrapper;
+import de.ids_mannheim.korap.query.wrap.SpanSegmentQueryWrapper;
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.index.Term;
+
+import java.util.*;
+
+public class SpanAlterQueryWrapper implements SpanQueryWrapperInterface {
+    private String field;
+    private SpanQuery query;
+    private List<SpanQuery> alternatives;
+
+    public SpanAlterQueryWrapper (String field) {
+	this.field = field;
+	this.alternatives = new ArrayList<>();
+    };
+
+    public SpanAlterQueryWrapper (String field, String ... terms) {
+	this.field = field;
+	this.alternatives = new ArrayList<>();
+	for (String term : terms) {
+	    this.alternatives.add(new SpanTermQuery(new Term(this.field, term)));
+	};
+    };
+
+    public SpanAlterQueryWrapper or (String term) {
+	this.alternatives.add(new SpanTermQuery(new Term(this.field, term)));
+	return this;
+    };
+
+    public SpanAlterQueryWrapper or (SpanQueryWrapperInterface term) {
+	this.alternatives.add( term.toQuery() );
+	return this;
+    };
+
+    public SpanAlterQueryWrapper or (SpanRegexQueryWrapper term) {
+	this.alternatives.add( term.toQuery() );
+	return this;
+    };
+
+    public SpanQuery toQuery() {
+	if (this.alternatives.size() == 1) {
+	    return (SpanQuery) this.alternatives.get(0);
+	};
+
+	Iterator<SpanQuery> clause = this.alternatives.iterator();
+	SpanOrQuery soquery = new SpanOrQuery( clause.next() );
+	while (clause.hasNext()) {
+	    soquery.addClause( clause.next() );
+	};
+	return (SpanQuery) soquery;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanClassQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanClassQueryWrapper.java
new file mode 100644
index 0000000..312c005
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanClassQueryWrapper.java
@@ -0,0 +1,41 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import org.apache.lucene.search.spans.SpanQuery;
+
+import de.ids_mannheim.korap.query.SpanClassQuery;
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+
+import java.util.*;
+
+
+public class SpanClassQueryWrapper implements SpanQueryWrapperInterface {
+    private SpanQueryWrapperInterface subquery;
+    private byte number = (byte) 0;
+
+    public SpanClassQueryWrapper (SpanQueryWrapperInterface subquery, byte number) {
+	this.subquery = subquery;
+	this.number = number;
+    };
+
+    public SpanClassQueryWrapper (SpanQueryWrapperInterface subquery, short number) {
+	this.subquery = subquery;
+	this.number = (byte) number;
+    };
+
+    public SpanClassQueryWrapper (SpanQueryWrapperInterface subquery, int number) {
+	this.subquery = subquery;
+	this.number = (byte) number;
+    };
+
+    public SpanClassQueryWrapper (SpanQueryWrapperInterface subquery) {
+	this.subquery = subquery;
+	this.number = (byte) 0;
+    };
+
+    public SpanQuery toQuery () {
+	if (this.number == (byte) 0) {
+	    return new SpanClassQuery((SpanQuery) this.subquery.toQuery());
+	};
+	return new SpanClassQuery((SpanQuery) this.subquery.toQuery(), (byte) this.number);
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanElementQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanElementQueryWrapper.java
new file mode 100644
index 0000000..acf0638
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanElementQueryWrapper.java
@@ -0,0 +1,20 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import org.apache.lucene.search.spans.SpanQuery;
+
+import de.ids_mannheim.korap.query.SpanElementQuery;
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+
+public class SpanElementQueryWrapper implements SpanQueryWrapperInterface {
+    String element;
+    String field;
+
+    public SpanElementQueryWrapper (String field, String element) {
+	this.field = field;
+	this.element = element;
+    };
+
+    public SpanQuery toQuery () {
+	return (SpanQuery) new SpanElementQuery(this.field, this.element);
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanMatchModifyQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanMatchModifyQueryWrapper.java
new file mode 100644
index 0000000..14bda8b
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanMatchModifyQueryWrapper.java
@@ -0,0 +1,38 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import org.apache.lucene.search.spans.SpanQuery;
+
+import de.ids_mannheim.korap.query.SpanMatchModifyQuery;
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+
+import java.util.*;
+
+
+public class SpanMatchModifyQueryWrapper implements SpanQueryWrapperInterface {
+    private SpanQueryWrapperInterface subquery;
+    private byte number;
+
+    public SpanMatchModifyQueryWrapper (SpanQueryWrapperInterface subquery, byte number) {
+	this.subquery = subquery;
+	this.number = number;
+    };
+
+    public SpanMatchModifyQueryWrapper (SpanQueryWrapperInterface subquery, short number) {
+	this.subquery = subquery;
+	this.number = (byte) number;
+    };
+
+    public SpanMatchModifyQueryWrapper (SpanQueryWrapperInterface subquery, int number) {
+	this.subquery = subquery;
+	this.number = (byte) number;
+    };
+
+    public SpanMatchModifyQueryWrapper (SpanQueryWrapperInterface subquery) {
+	this.subquery = subquery;
+	this.number = (byte) 0;
+    };
+
+    public SpanQuery toQuery () {
+	return new SpanMatchModifyQuery(this.subquery.toQuery(), this.number);
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanQuantifierQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanQuantifierQueryWrapper.java
new file mode 100644
index 0000000..521cb0a
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanQuantifierQueryWrapper.java
@@ -0,0 +1,64 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import java.util.*;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+import org.apache.lucene.search.spans.SpanQuery;
+
+public class SpanQuantifierQueryWrapper implements SpanQueryWrapperInterface {
+    private String field;
+
+    public SpanQuantifierQueryWrapper (String field) {
+	this.field = field;
+    };
+
+    public SpanQuery toQuery () {
+	return (SpanQuery) null;
+    };
+
+
+    /*
+
+Only support spans with minimal one occurrence and then
+flag spans with NOT_NECESSARY.
+This unfortunately means to support this in at least spanNextQuery
+Problem: Queries without context:
+
+[]*[s:tree]? -> matches everything!
+
+The any segment is special, it shuld be supported by a special
+spanNextQuery, where it adds a position (or more) to the matching span.
+spanNext(Query1, ANY)
+
+      API idea:
+      opt();
+      star();
+      plus();
+      occ(2);
+      occ(2, this.UNLIMITED);
+      occ(0, 4);
+      occ(5, 8);
+
+      Implementation idea:
+      This query should work similar to NextSpans with looking at all matching spans
+      in order per document, returning matching positions for all sequences in the boundary.
+      All actions should be translated to {x,y} boundaries.
+      ?     -> {0,1}
+      +     -> {1,UNL}
+      *     -> {0,UNL}
+      (2)   -> {2,2}
+      (,3)  -> {0,3}
+      (3,)  -> {3,UNL}
+      (3,4) -> {3,4}
+
+      oldSpanEnd = X;
+      For (i = 0; i < orderedSpans.length; i) {
+      # ...
+      };
+
+    */
+};
+
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanQueryWrapperInterface.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanQueryWrapperInterface.java
new file mode 100644
index 0000000..a4b1fcb
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanQueryWrapperInterface.java
@@ -0,0 +1,9 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import org.apache.lucene.search.spans.SpanQuery;
+
+// Todo: Make this an abstract class to deal with regexes in a parent abstract class!
+
+public interface SpanQueryWrapperInterface {
+    public SpanQuery toQuery ();
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanRegexQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanRegexQueryWrapper.java
new file mode 100644
index 0000000..8152a6f
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanRegexQueryWrapper.java
@@ -0,0 +1,41 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
+import org.apache.lucene.util.automaton.RegExp;
+import org.apache.lucene.index.Term;
+
+import java.util.*;
+
+public class SpanRegexQueryWrapper {
+    private SpanQuery query;
+
+    public SpanRegexQueryWrapper (String field, String re) {
+	this(field, re, RegExp.ALL, false);
+    };
+
+    public SpanRegexQueryWrapper (String field, String re, int flags) {
+	this(field, re, flags, false);
+    };
+
+    public SpanRegexQueryWrapper (String field, String re, boolean caseinsensitive) {
+	this(field, re, RegExp.ALL, caseinsensitive);
+    };
+
+    public SpanRegexQueryWrapper (String field, String re, int flags, boolean caseinsensitive) {
+	if (caseinsensitive) {
+	    if (re.startsWith("s:")) {
+		re = re.replaceFirst("s:", "i:");
+	    };
+	    // TODO: This may break things like \N
+	    re = re.toLowerCase();
+	};
+	RegexpQuery requery = new RegexpQuery(new Term(field, re), flags);
+	query = new SpanMultiTermQueryWrapper<RegexpQuery>( requery );
+    };
+
+    public SpanQuery toQuery() {
+	return this.query;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanSegmentQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanSegmentQueryWrapper.java
new file mode 100644
index 0000000..b8d07bc
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanSegmentQueryWrapper.java
@@ -0,0 +1,168 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import java.util.*;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanNotQuery;
+import org.apache.lucene.search.spans.SpanOrQuery;
+import de.ids_mannheim.korap.query.wrap.SpanRegexQueryWrapper;
+
+/**
+ * @author Nils Diewald
+ * @version 0.01
+ *
+ * Creates a query object for segments, i.e. terms in a term vector
+ * sharing the same position. A SpanSegment can include simple string terms,
+ * regular expressions and alternatives. These elements can also be excluded.
+ */
+
+public class SpanSegmentQueryWrapper implements SpanQueryWrapperInterface {
+    public ArrayList<SpanQuery> inclusive;
+    public ArrayList<SpanQuery> exclusive;
+    private String field;
+
+    /**
+     * Constructor.
+     *
+     * @param field The field name
+     */
+    public SpanSegmentQueryWrapper (String field) {
+	this.field = field;
+	this.inclusive = new ArrayList<SpanQuery>();
+	this.exclusive = new ArrayList<SpanQuery>();
+    };
+
+    /**
+     * Constructor.
+     *
+     * @param field The field name
+     * @param terms An arbitrary number of terms
+     */
+    public SpanSegmentQueryWrapper (String field, String ... terms) {
+	this(field);
+	for (int i = 0; i < terms.length; i++) {
+	    this.inclusive.add((SpanQuery) new SpanTermQuery(new Term(field, terms[i])));
+	};
+    };
+
+    public SpanSegmentQueryWrapper (String field, SpanRegexQueryWrapper re) {
+	this(field);
+	this.inclusive.add((SpanQuery) re.toQuery());
+    };
+
+    public SpanSegmentQueryWrapper (String field, SpanAlterQueryWrapper alter) {
+	this(field);
+	this.inclusive.add((SpanQuery) alter.toQuery());
+    };
+
+    public SpanSegmentQueryWrapper (String field, SpanSegmentQueryWrapper ssq) {
+	this(field);
+
+	Iterator<SpanQuery> clause = ssq.inclusive.iterator();
+	while (clause.hasNext()) {
+	    this.inclusive.add( (SpanQuery) clause.next().clone() );
+	};
+
+	clause = ssq.exclusive.iterator();
+	while (clause.hasNext()) {
+	    this.exclusive.add( (SpanQuery) clause.next().clone() );
+	};
+    };
+
+    public SpanSegmentQueryWrapper with (String term) {
+	this.inclusive.add(new SpanTermQuery(new Term(field, term)));
+	return this;
+    };
+
+    public SpanSegmentQueryWrapper with (SpanRegexQueryWrapper re) {
+	this.inclusive.add((SpanQuery) re.toQuery());
+	return this;
+    };
+
+    public SpanSegmentQueryWrapper with (SpanAlterQueryWrapper alter) {
+	this.inclusive.add((SpanQuery) alter.toQuery());
+	return this;
+    };
+
+    public SpanSegmentQueryWrapper without (String term) {
+	this.exclusive.add(new SpanTermQuery(new Term(field, term)));
+	return this;
+    };
+
+    public SpanSegmentQueryWrapper without (SpanRegexQueryWrapper re) {
+	this.exclusive.add((SpanQuery) re.toQuery());
+	return this;
+    };
+
+    public SpanSegmentQueryWrapper without (SpanAlterQueryWrapper alter) {
+	this.exclusive.add((SpanQuery) alter.toQuery());
+	return this;
+    };
+
+    public SpanQuery toQuery () {
+	if (this.inclusive.size() + this.exclusive.size() == 0) {
+	    return null;
+	}
+	else if (this.inclusive.size() >= 1 && this.exclusive.size() >= 1) {
+	    return (SpanQuery) new SpanNotQuery(
+		this._listToQuery(this.inclusive),
+	        this._listToOrQuery(this.exclusive)
+            );
+	}
+
+	else if (this.inclusive.size() == 0 && this.exclusive.size() >= 1) {
+	    return (SpanQuery) new SpanNotQuery(
+		new SpanTermQuery(new Term(this.field, "T")),
+	        this._listToOrQuery(this.exclusive)
+            );
+	}
+
+	else if (this.inclusive.size() >= 1 && this.exclusive.size() == 0) {
+	    return (SpanQuery) this._listToQuery(this.inclusive);
+	};
+
+	return (SpanQuery) null;
+    };
+
+
+    private SpanQuery _listToQuery (ArrayList<SpanQuery> list) {
+	SpanQuery query = list.get(0);
+
+	for (int i = 1; i < list.size(); i++) {
+	    query = new SpanNearQuery(
+	        new SpanQuery[] {
+	            query,
+	            list.get(i)
+	        },
+	        -1,
+	        false,
+		false
+            );
+	};
+
+	return (SpanQuery) query;
+    };
+
+
+    private SpanQuery _listToOrQuery (ArrayList<SpanQuery> list) {
+	if (list.size() == 1) {
+	    return (SpanQuery) list.get(0);
+	};
+
+	Iterator<SpanQuery> clause = list.iterator();
+	SpanOrQuery soquery = new SpanOrQuery( clause.next() );
+	while (clause.hasNext()) {
+	    soquery.addClause( clause.next() );
+	};
+	return (SpanQuery) soquery;
+    };
+
+    public SpanSegmentQueryWrapper clone () {
+	return new SpanSegmentQueryWrapper(this.field, this);
+    };
+};
+
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanSequenceQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanSequenceQueryWrapper.java
new file mode 100644
index 0000000..977b556
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanSequenceQueryWrapper.java
@@ -0,0 +1,101 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import java.util.*;
+import de.ids_mannheim.korap.query.SpanNextQuery;
+import de.ids_mannheim.korap.query.wrap.SpanSegmentQueryWrapper;
+import de.ids_mannheim.korap.query.wrap.SpanRegexQueryWrapper;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+
+/**
+ * @author Nils Diewald
+ */
+public class SpanSequenceQueryWrapper implements SpanQueryWrapperInterface {
+    private String field;
+    public ArrayList<SpanQuery> segments;
+
+    public SpanSequenceQueryWrapper (String field) {
+	this.field = field;
+	this.segments = new ArrayList<SpanQuery>();
+    };
+
+    public SpanSequenceQueryWrapper (String field, String ... terms) {
+	this(field);
+	for (int i = 0; i < terms.length; i++) {
+	    this.segments.add((SpanQuery) new SpanTermQuery(new Term(field, terms[i])));
+	};
+    };
+
+    public SpanSequenceQueryWrapper (String field, SpanQuery sq) {
+	this(field);
+	this.segments.add((SpanQuery) sq);
+    };
+
+    public SpanSequenceQueryWrapper (String field, SpanQueryWrapperInterface sswq) {
+	this(field);
+	this.segments.add((SpanQuery) sswq.toQuery());
+    };
+
+    public SpanSequenceQueryWrapper (String field, SpanRegexQueryWrapper re) {
+	this(field);
+	this.segments.add((SpanQuery) re.toQuery());
+    };
+
+    public SpanQuery get (int index) {
+	return this.segments.get(index);
+    };
+
+    public void set (int index, SpanQuery sq) {
+	this.segments.set(index, sq);
+    };
+
+    public SpanSequenceQueryWrapper append (String term) {
+	this.segments.add((SpanQuery) new SpanTermQuery(new Term(field, term)));
+	return this;
+    };
+
+    public SpanSequenceQueryWrapper append (SpanQueryWrapperInterface ssq) {
+	this.segments.add((SpanQuery) ssq.toQuery());
+	return this;
+    };
+
+    public SpanSequenceQueryWrapper append (SpanRegexQueryWrapper srqw) {
+	this.segments.add((SpanQuery) srqw.toQuery());
+	return this;
+    };
+
+    public SpanSequenceQueryWrapper prepend (String term) {
+	this.segments.add(0, (SpanQuery) new SpanTermQuery(new Term(field, term)));
+	return this;
+    };
+
+    public SpanSequenceQueryWrapper prepend (SpanSegmentQueryWrapper ssq) {
+	this.segments.add(0, (SpanQuery) ssq.toQuery());
+	return this;
+    };
+
+    public SpanSequenceQueryWrapper prepend (SpanRegexQueryWrapper re) {
+	this.segments.add(0, (SpanQuery) re.toQuery());
+	return this;
+    };
+
+    public SpanQuery toQuery () {
+	if (this.segments.size() == 0) {
+	    return (SpanQuery) null;
+	};
+
+	SpanQuery query = this.segments.get(0);
+
+	for (int i = 1; i < this.segments.size(); i++) {
+	    query = new SpanNextQuery(
+		query,
+	        this.segments.get(i),
+		false
+            );
+	};
+	return (SpanQuery) query;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanWithinQueryWrapper.java b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanWithinQueryWrapper.java
new file mode 100644
index 0000000..33d7386
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/query/wrap/SpanWithinQueryWrapper.java
@@ -0,0 +1,27 @@
+package de.ids_mannheim.korap.query.wrap;
+
+import de.ids_mannheim.korap.query.SpanWithinQuery;
+import de.ids_mannheim.korap.query.wrap.SpanSegmentQueryWrapper;
+import de.ids_mannheim.korap.query.wrap.SpanRegexQueryWrapper;
+import de.ids_mannheim.korap.query.wrap.SpanSequenceQueryWrapper;
+import de.ids_mannheim.korap.query.wrap.SpanQueryWrapperInterface;
+
+import java.util.*;
+
+import org.apache.lucene.search.spans.SpanQuery;
+
+
+
+public class SpanWithinQueryWrapper implements SpanQueryWrapperInterface {
+    private SpanQueryWrapperInterface element;
+    private SpanQueryWrapperInterface wrap;
+
+    public SpanWithinQueryWrapper (SpanQueryWrapperInterface element, SpanQueryWrapperInterface wrap) {
+	this.element = element;
+	this.wrap = wrap;
+    };
+
+    public SpanQuery toQuery () {
+	return new SpanWithinQuery(this.element.toQuery(), this.wrap.toQuery());
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/util/KorapArray.java b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapArray.java
new file mode 100644
index 0000000..62d1b8c
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapArray.java
@@ -0,0 +1,52 @@
+package de.ids_mannheim.korap.util;
+
+import java.util.*;
+
+/**
+ * @author Nils Diewald
+ *
+ * A collection of Array specific utilities for the Korap project.
+ */
+public class KorapArray {
+
+    /**
+     * Join a sequence of strings to a single string.
+     *
+     * @param separator String to separate joined segments
+     * @param strings Segments to join
+     */
+    public static String join (String separator, String ... strings) {
+	if (strings.length == 0)
+	    return "";
+
+	StringBuffer sb = new StringBuffer(strings[0]);
+
+	for (int i = 1; i < strings.length; i++) {
+	    sb.append(separator);
+	    sb.append(strings[i]);
+	};
+
+	return sb.toString();
+    };
+
+
+    /**
+     * Join a sequence of strings to a single string.
+     *
+     * @param separator Character to separate joined segments
+     * @param strings Segments to join
+     */
+    public static String join (char separator, String ... strings) {
+	if (strings.length == 0)
+	    return "";
+
+	StringBuffer sb = new StringBuffer(strings[0]);
+
+	for (int i = 1; i < strings.length; i++) {
+	    sb.append(separator);
+	    sb.append(strings[i]);
+	};
+
+	return sb.toString();
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/util/KorapByte.java b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapByte.java
new file mode 100644
index 0000000..9115ba7
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapByte.java
@@ -0,0 +1,61 @@
+package de.ids_mannheim.korap.util;
+
+import java.util.*;
+
+// Maybe wrong! TEST!
+
+/**
+ * @author Nils Diewald
+ *
+ * A collection of methods to deal with Bytes and Byte arrays.
+ */
+public class KorapByte {
+
+    /**
+     * Convert an integer to a byte array.
+     *
+     * @param number The number to convert.
+     */
+    // Based on http://www.tutorials.de/java/228129-konvertierung-von-integer-byte-array.html
+    public static byte[] int2byte (int number) {
+	byte[] data = new byte[4];
+	for (int i = 0; i < 4; ++i) {
+	    int shift = i << 3; // i * 8
+	    data[3-i] = (byte)((number & (0xff << shift)) >>> shift);
+	};
+	return data;
+    };
+
+    /**
+     * Convert a byte array to an integer.
+     *
+     * @param number The number to convert.
+     */
+    // Based on http://www.tutorials.de/java/228129-konvertierung-von-integer-byte-array.html
+    public static int byte2int (byte[] data, int offset) {
+	int number = 0;   
+	int i = (offset*4);  
+	for (; i < 4; ++i) {
+	    number |= (data[3-i] & 0xff) << (i << 3);
+	};
+	return number;
+    };
+
+    public static int byte2int (byte[] data) {
+	return byte2int(data, 0);
+    };
+
+    /*
+    public static short byte2short (byte[] data, int offset) {
+	short number = 0;
+	number |= (data[3-offset] & 0xff) << (offset << 3);
+	offset--;
+	number |= (data[3-offset] & 0xff) << (offset << 3);
+	return number;
+    };
+
+    public static short byte2short (byte[] data) {
+	return byte2short(datam 0);
+    };
+    */
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/util/KorapDate.java b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapDate.java
new file mode 100644
index 0000000..bc3d717
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapDate.java
@@ -0,0 +1,166 @@
+package de.ids_mannheim.korap.util;
+
+import java.util.*;
+import java.util.regex.*;
+
+/**
+ * @author Nils Diewald
+ *
+ * KorapDate implements a helper object to stringify and parse date strings implemented
+ * for integer range queries.
+ */
+public class KorapDate {
+    /*
+    protected char[] year  = new char[4];
+    protected char[] month = new char[2];
+    protected char[] day   = new char[2];
+    */
+
+    private int year = 0, month = 0, day = 0;
+
+    private static final Pattern datePattern = Pattern.compile(
+	"(\\d\\d\\d\\d)" +
+        "(?:[-/]?(\\d\\d)" +
+        "(?:[-/]?(\\d\\d))?)?"
+    );
+
+    public static int END = 99_999_999;
+    public static int BEGINNING = 0;
+
+    public KorapDate (String dateStr) {
+	if (dateStr == null || dateStr.isEmpty())
+	    return;
+
+	Matcher m = datePattern.matcher(dateStr);
+	if (m.matches()) {
+	    this.year = Integer.parseInt(m.group(1));
+	    if (m.group(2) != null)
+		this.month = Integer.parseInt(m.group(2));
+		if (m.group(3) != null)
+		    this.day   = Integer.parseInt(m.group(3));
+	}
+	else {
+	    return;
+	};
+    };
+
+    private static int ceil (short padding, int nr) {
+	if (nr == 0) {
+	    if (padding == (short) 4) {
+		return 9999;
+	    }
+	    else if (padding == (short) 2) {
+		return 99;
+	    };
+	};
+	return nr;
+    };
+
+    // make yyyy???? become yyyy9999 and yyyymm?? yyyymm99
+    public int ceil () {
+	return
+	    (ceil((short) 4, this.year) * 10_000) +
+	    (ceil((short) 2, this.month) * 100) +
+	    (ceil((short) 2, this.day));
+    };
+
+    // make yyyy???? become yyyy0000 and yyyymm?? yyyymm00
+    public int floor () {
+	int floor = 0;
+	if (this.year == 0) {
+	    return 0;
+	}
+	else {
+	    floor = this.year * 10_000;
+	};
+	if (this.month == 0) {
+	    return floor;
+	}
+	else {
+	    floor += this.month * 100;
+	};
+	if (this.day == 0) {
+	    return floor;
+	};
+	return (floor + this.day);
+    };
+
+
+    public int year () {
+	return this.year;
+    };
+
+    public int month () {
+	return this.month;
+    };
+
+    public int day () {
+	return this.day;
+    };
+
+
+    public String toString() {
+	StringBuilder sb = this.toStringBuilder();
+	if (sb.length() < 4)
+	    return null;
+
+	if (sb.length() < 8) {
+	    sb.append("00");
+	    if (sb.length() < 6) {
+		sb.append("00");
+	    };
+	};
+
+	return sb.toString();
+    };
+
+    public String toDisplay() {
+	StringBuilder sb = this.toStringBuilder();
+	if (sb.length() == 8)
+	    sb.insert(6, '-');
+
+	if (sb.length() > 4)
+	    sb.insert(4, '-');
+
+	return sb.toString();
+    };
+
+    public String toCeilString() {
+	StringBuilder sb = new StringBuilder();
+	return sb.append(this.ceil()).toString();
+    };
+
+    public String toFloorString() {
+	StringBuilder sb = new StringBuilder();
+	return sb.append(this.floor()).toString();
+    };
+
+    // Format date as yyyymmdd
+    private StringBuilder toStringBuilder () {
+	StringBuilder sb = new StringBuilder();
+	if (this.year != 0) {
+
+	    // Append year
+	    if (this.year < 100)
+		sb.append("20");
+
+	    sb.append(this.year);
+	    
+	    if (this.month != 0) {
+
+		// Append month
+		if (this.month < 10)
+		    sb.append('0');
+		sb.append(this.month);
+
+		if (this.day != 0) {
+		    // Append month
+		    if (this.day < 10)
+			sb.append('0');
+		    sb.append(this.day);
+		};
+	    };
+	};
+	return sb;
+    };
+};
diff --git a/trunk/src/main/java/de/ids_mannheim/korap/util/KorapHTML.java b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapHTML.java
new file mode 100644
index 0000000..cadbb11
--- /dev/null
+++ b/trunk/src/main/java/de/ids_mannheim/korap/util/KorapHTML.java
@@ -0,0 +1,22 @@
+package de.ids_mannheim.korap.util;
+
+/**
+ * @author Nils Diewald
+ *
+ * A collection of methods to deal with Bytes and Byte arrays.
+ */
+public class KorapHTML {
+
+    /**
+     * Encode a string HTML secure.
+     *
+     * @param text The string to encode.
+     */
+    public static String encodeHTML (String text) {
+	return
+	    text.replace("&", "&amp;")
+	    .replace("<", "&lt;")
+	    .replace(">", "&gt;")
+	    .replace("\"", "&quot;");
+    };
+};
diff --git a/trunk/src/main/resources/log4j.properties b/trunk/src/main/resources/log4j.properties
new file mode 100644
index 0000000..3a2747e
--- /dev/null
+++ b/trunk/src/main/resources/log4j.properties
@@ -0,0 +1,21 @@
+## logger file can be used with
+
+log4j.rootLogger = DEBUG, stdout
+
+#log4j.logger.de.ids_mannheim.korap.query.spans.ElementSpans = TRACE, stdout
+# log4j.logger.de.ids_mannheim.korap.query.spans.WithinSpans = TRACE, stdout
+#log4j.logger.de.ids_mannheim.korap.query.SpanNextQuery = TRACE, stdout
+# log4j.logger.de.ids_mannheim.korap.query.spans.NextSpans = TRACE, stdout
+#log4j.logger.de.ids_mannheim.korap.query.spans.KorapTermSpan = TRACE, stdout
+#log4j.logger.de.ids_mannheim.korap.query.spans.ClassSpans = TRACE, stdout
+#log4j.logger.de.ids_mannheim.korap.query.spans.MatchSpans = TRACE, stdout
+#log4j.logger.de.ids_mannheim.korap.KorapIndex = TRACE, stdout
+#log4j.logger.de.ids_mannheim.korap.KorapMatch = TRACE, stdout
+
+# log4j.logger.de.ids_mannheim.korap.analysis.MultiTermTokenStream = TRACE, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern = %5p (%F:%L) -> %m%n
+
+# log4j.appender.stdout.Target=System.out