This is an automated email from the ASF dual-hosted git repository.

sergeykamov pushed a commit to branch NLPCRAFT-485
in repository https://gitbox.apache.org/repos/asf/incubator-nlpcraft.git

commit 462bc71a688ac05da144f27077eebd4cf4a035ee
Author: Sergey Kamov <[email protected]>
AuthorDate: Tue Mar 1 14:00:05 2022 +0300

    Lemma and POS removed from NCToken and added as token's properties.
---
 .../examples/lightswitch/LightSwitchRuModel.scala  |  7 ++-
 ...icher.scala => NCRuLemmaPosTokenEnricher.scala} | 44 ++++++++-----
 .../enricher/NCRuStopWordsTokenEnricher.scala      |  7 ++-
 .../nlp/token/parser/NCRuTokenParser.scala         | 49 ++++-----------
 .../stanford/NCStanfordLemmaPosTokenEnricher.java  | 50 ++++-----------
 .../impl/NCStanfordLemmaPosTokenEnricherImpl.scala | 18 ++----
 .../impl/NCStanfordNLPTokenParserImpl.scala        |  6 +-
 .../stanford/NCStanfordNLPTokenParserSpec.scala    |  5 +-
 .../main/scala/org/apache/nlpcraft/NCToken.java    | 12 ----
 .../internal/impl/NCModelPipelineManager.scala     |  4 +-
 .../apache/nlpcraft/nlp/NCENDefaultPipeline.java   | 16 ++---
 .../nlpcraft/nlp/NCENSemanticEntityParser.java     |  6 +-
 .../parser/nlp/impl/NCNLPEntityParserImpl.scala    |  2 -
 .../en/NCLemmaPosTokenEnricher.java}               | 43 +++++++------
 .../en/impl/NCDictionaryTokenEnricherImpl.scala    |  4 +-
 .../en/impl/NCLemmaPosTokenEnricherImpl.scala}     | 53 ++++++----------
 .../en/impl/NCQuotesTokenEnricherImpl.scala        |  3 +-
 .../en/impl/NCStopWordsTokenEnricherImpl.scala     | 22 ++++---
 .../token/parser/opennlp/NCOpenNLPTokenParser.java |  6 +-
 .../opennlp/impl/NCOpenNLPTokenParserImpl.scala    | 73 ++++------------------
 .../opennlp/NCEnOpenNlpTokenParserBenchmark.java   |  6 +-
 .../semantic/NCSemanticEntityParserSpec.scala      | 13 +++-
 .../en/NCDictionaryTokenEnricherSpec.scala         | 18 +++++-
 .../enricher/en/NCQuotesTokenEnricherSpec.scala    | 16 ++++-
 .../enricher/en/NCStopWordsEnricherSpec.scala      | 17 ++++-
 .../parser/opennlp/NCOpenNLPTokenParserSpec.scala  | 57 ++++-------------
 .../org/apache/nlpcraft/nlp/util/NCTestToken.scala |  5 +-
 .../org/apache/nlpcraft/nlp/util/NCTestUtils.scala |  4 +-
 .../nlp/util/opennlp/NCTestConfigJava.java         |  6 +-
 29 files changed, 228 insertions(+), 344 deletions(-)

diff --git 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/LightSwitchRuModel.scala
 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/LightSwitchRuModel.scala
index 822e98c..10e02b2 100644
--- 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/LightSwitchRuModel.scala
+++ 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/LightSwitchRuModel.scala
@@ -19,7 +19,7 @@ package org.apache.nlpcraft.examples.lightswitch
 
 import org.apache.nlpcraft.*
 import 
org.apache.nlpcraft.examples.lightswitch.nlp.entity.parser.semantic.NCRuSemanticEntityParser
-import 
org.apache.nlpcraft.examples.lightswitch.nlp.token.enricher.NCRuStopWordsTokenEnricher
+import 
org.apache.nlpcraft.examples.lightswitch.nlp.token.enricher.{NCRuLemmaPosTokenEnricher,
 NCRuStopWordsTokenEnricher}
 import 
org.apache.nlpcraft.examples.lightswitch.nlp.token.parser.NCRuTokenParser
 import org.apache.nlpcraft.nlp.entity.parser.nlp.NCNLPEntityParser
 import org.apache.nlpcraft.nlp.entity.parser.semantic.NCSemanticEntityParser
@@ -47,7 +47,10 @@ class LightSwitchRuModel extends NCModelAdapter(
     new NCModelConfig("nlpcraft.lightswitch.ru.ex", "LightSwitch Example Model 
RU", "1.0"),
     new NCModelPipeline:
         override val getTokenParser: NCTokenParser = new NCRuTokenParser()
-        override val getTokenEnrichers: util.List[NCTokenEnricher] = Seq(new 
NCRuStopWordsTokenEnricher()).asJava
+        override val getTokenEnrichers: util.List[NCTokenEnricher] = Seq(
+            new NCRuLemmaPosTokenEnricher(),
+            new NCRuStopWordsTokenEnricher()
+        ).asJava
         override val getEntityParsers: util.List[NCEntityParser] = Seq(new 
NCRuSemanticEntityParser("lightswitch_model_ru.yaml")).asJava
 ):
     /**
diff --git 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuLemmaPosTokenEnricher.scala
similarity index 50%
copy from 
nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
copy to 
nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuLemmaPosTokenEnricher.scala
index aaebce9..1e6d15c 100644
--- 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
+++ 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuLemmaPosTokenEnricher.scala
@@ -19,26 +19,38 @@ package 
org.apache.nlpcraft.examples.lightswitch.nlp.token.enricher
 
 import org.apache.lucene.analysis.ru.RussianAnalyzer
 import org.apache.nlpcraft.*
+import org.languagetool.AnalyzedToken
+import org.languagetool.tagging.ru.RussianTagger
 
 import java.util
+import java.util.stream.Collectors
 import scala.jdk.CollectionConverters.*
 
 /**
   *
   */
-class NCRuStopWordsTokenEnricher extends NCTokenEnricher:
-    private final val stops = RussianAnalyzer.getDefaultStopSet
-
-    override def enrich(req: NCRequest, cfg: NCModelConfig, toks: 
util.List[NCToken]): Unit =
-        for (t <- toks.asScala)
-            val lemma = t.getLemma
-            lazy val pos = t.getPos
-
-            t.put(
-                "stopword",
-                lemma.length == 1 && !Character.isLetter(lemma.head) && 
!Character.isDigit(lemma.head) ||
-                stops.contains(lemma.toLowerCase) ||
-                pos.startsWith("PARTICLE") ||
-                pos.startsWith("INTERJECTION") ||
-                pos.startsWith("PREP")
-            )
\ No newline at end of file
+class NCRuLemmaPosTokenEnricher extends NCTokenEnricher:
+    private def nvl(v: String, dflt : => String): String = if v != null then v 
else dflt
+
+    override def enrich(req: NCRequest, cfg: NCModelConfig, toksList: 
util.List[NCToken]): Unit =
+        val toks = toksList.asScala
+        val tags = 
RussianTagger.INSTANCE.tag(toks.map(_.getText).asJava).asScala
+
+        require(toks.size == tags.size)
+
+        toks.zip(tags).foreach { case (tok, tag) =>
+            val readings = tag.getReadings.asScala
+
+            val (lemma, pos) = readings.size match
+                // No data. Lemma is word as is, POS is undefined.
+                case 0 => (tok.getText, "")
+                // Takes first. Other variants ignored.
+                case _ =>
+                    val aTok: AnalyzedToken = readings.head
+                    (nvl(aTok.getLemma, tok.getText), nvl(aTok.getPOSTag, ""))
+
+            tok.put("pos", pos)
+            tok.put("lemma", lemma)
+
+            () // Otherwise NPE.
+        }
diff --git 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
index aaebce9..e675ed4 100644
--- 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
+++ 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/enricher/NCRuStopWordsTokenEnricher.scala
@@ -29,10 +29,13 @@ import scala.jdk.CollectionConverters.*
 class NCRuStopWordsTokenEnricher extends NCTokenEnricher:
     private final val stops = RussianAnalyzer.getDefaultStopSet
 
+    private def getPos(t: NCToken): String = t.getOpt("pos").orElseThrow(() => 
throw new NCException("POS not found in token."))
+    private def getLemma(t: NCToken): String = 
t.getOpt("lemma").orElseThrow(() => throw new NCException("Lemma not found in 
token."))
+
     override def enrich(req: NCRequest, cfg: NCModelConfig, toks: 
util.List[NCToken]): Unit =
         for (t <- toks.asScala)
-            val lemma = t.getLemma
-            lazy val pos = t.getPos
+            val lemma = getLemma(t)
+            lazy val pos = getPos(t)
 
             t.put(
                 "stopword",
diff --git 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/parser/NCRuTokenParser.scala
 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/parser/NCRuTokenParser.scala
index 73e4b33..8dda52d 100644
--- 
a/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/parser/NCRuTokenParser.scala
+++ 
b/nlpcraft-examples/lightswitch-ru/src/main/java/org/apache/nlpcraft/examples/lightswitch/nlp/token/parser/NCRuTokenParser.scala
@@ -28,46 +28,23 @@ import org.languagetool.tokenizers.WordTokenizer
 import java.util
 import scala.jdk.CollectionConverters.*
 
-object NCRuTokenParser:
+/**
+  *
+  */
+class NCRuTokenParser extends NCTokenParser:
     private val tokenizer = new WordTokenizer
-    private case class Span(word: String, start: Int, end: Int)
-    private def nvl(v: String, dflt : => String): String = if v != null then v 
else dflt
 
-    private def split(text: String): Seq[Span] =
-        val spans = collection.mutable.ArrayBuffer.empty[Span]
+    override def tokenize(text: String): util.List[NCToken] =
+        val toks = collection.mutable.ArrayBuffer.empty[NCToken]
         var sumLen = 0
 
         for (((word, len), idx) <- tokenizer.tokenize(text).asScala.map(p => p 
-> p.length).zipWithIndex)
-            if word.strip.nonEmpty then spans += Span(word, sumLen, sumLen + 
word.length)
-            sumLen += word.length
+            if word.strip.nonEmpty then toks += new NCPropertyMapAdapter with 
NCToken:
+                override def getText: String = word
+                override def getIndex: Int = idx
+                override def getStartCharIndex: Int = sumLen
+                override def getEndCharIndex: Int = sumLen + word.length
 
-        spans.toSeq
-
-import NCRuTokenParser.*
-
-class NCRuTokenParser extends NCTokenParser:
-    override def tokenize(text: String): util.List[NCToken] =
-        val spans = split(text)
-        val tags = RussianTagger.INSTANCE.tag(spans.map(_.word).asJava).asScala
-
-        require(spans.size == tags.size)
-
-        spans.zip(tags).zipWithIndex.map { case ((span, tag), idx) =>
-            val readings = tag.getReadings.asScala
-
-            val (lemma, pos) = readings.size match
-                // No data. Lemma is word as is, POS is undefined.
-                case 0 => (span.word, "")
-                // Takes first. Other variants ignored.
-                case _ =>
-                    val aTok: AnalyzedToken = readings.head
-                    (nvl(aTok.getLemma, span.word), nvl(aTok.getPOSTag, ""))
+            sumLen += word.length
 
-            new NCPropertyMapAdapter with NCToken:
-                override val getText: String = span.word
-                override val getIndex: Int = idx
-                override val getStartCharIndex: Int = span.start
-                override val getEndCharIndex: Int = span.end
-                override val getLemma: String = lemma
-                override val getPos: String = pos
-        }.asJava
\ No newline at end of file
+        toks.asJava
diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java 
b/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/enricher/stanford/NCStanfordLemmaPosTokenEnricher.java
similarity index 64%
copy from nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java
copy to 
nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/enricher/stanford/NCStanfordLemmaPosTokenEnricher.java
index 2d6c87d..2f003dc 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java
+++ 
b/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/enricher/stanford/NCStanfordLemmaPosTokenEnricher.java
@@ -15,45 +15,21 @@
  * limitations under the License.
  */
 
-package org.apache.nlpcraft;
+package org.apache.nlpcraft.nlp.token.enricher.stanford;
+
+import org.apache.nlpcraft.NCModelConfig;
+import org.apache.nlpcraft.NCRequest;
+import org.apache.nlpcraft.NCToken;
+import org.apache.nlpcraft.NCTokenEnricher;
+
+import java.util.List;
 
 /**
  *
  */
-public interface NCToken extends NCPropertyMap {
-    /**
-     *
-     * @return
-     */
-    String getText();
-
-    /**
-     *
-     * @return
-     */
-    int getIndex();
-
-    /**
-     *
-     * @return
-     */
-    int getStartCharIndex();
-
-    /**
-     *
-     * @return
-     */
-    int getEndCharIndex();
-
-    /**
-     *
-     * @return
-     */
-    String getLemma();
-
-    /**
-     *
-     * @return
-     */
-    String getPos();
+public class NCStanfordLemmaPosTokenEnricher implements NCTokenEnricher {
+    @Override
+    public void enrich(NCRequest req, NCModelConfig cfg, List<NCToken> toks) {
+        // TODO:
+    }
 }
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
 
b/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/enricher/stanford/impl/NCStanfordLemmaPosTokenEnricherImpl.scala
similarity index 66%
copy from 
nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
copy to 
nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/enricher/stanford/impl/NCStanfordLemmaPosTokenEnricherImpl.scala
index 59c1847..147cf1b 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
+++ 
b/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/enricher/stanford/impl/NCStanfordLemmaPosTokenEnricherImpl.scala
@@ -15,21 +15,13 @@
  * limitations under the License.
  */
 
-package org.apache.nlpcraft.nlp.token.enricher.en.impl
+package org.apache.nlpcraft.nlp.token.enricher.stanford.impl
 
 import org.apache.nlpcraft.*
-import org.apache.nlpcraft.internal.util.NCUtils
 
-import java.util.List as JList
+import java.util
 
-/**
-  *
-  */
-class NCDictionaryTokenEnricherImpl extends NCTokenEnricher:
-    private var dict: Set[String] = _
+class NCStanfordLemmaPosTokenEnricherImpl extends NCTokenEnricher:
+    // TODO:
 
-    init()
-
-    private def init(): Unit = dict = 
NCUtils.readResource("moby/354984si.ngl", "iso-8859-1").toSet
-    override def enrich(req: NCRequest, cfg: NCModelConfig, toks: 
JList[NCToken]): Unit =
-        toks.forEach(t => t.put("dict", dict.contains(t.getLemma)))
+    override def enrich(req: NCRequest, cfg: NCModelConfig, toks: 
util.List[NCToken]): Unit = ???
diff --git 
a/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/parser/stanford/impl/NCStanfordNLPTokenParserImpl.scala
 
b/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/parser/stanford/impl/NCStanfordNLPTokenParserImpl.scala
index 15152e8..c8baa05 100644
--- 
a/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/parser/stanford/impl/NCStanfordNLPTokenParserImpl.scala
+++ 
b/nlpcraft-stanford/src/main/java/org/apache/nlpcraft/nlp/token/parser/stanford/impl/NCStanfordNLPTokenParserImpl.scala
@@ -49,10 +49,12 @@ class NCStanfordNLPTokenParserImpl(stanford: 
StanfordCoreNLP) extends NCTokenPar
             zipWithIndex.map { (t, idx) =>
                 val txt = t.originalText()
 
+                // TODO:
                 new NCPropertyMapAdapter with NCToken:
                     override val getText: String = txt
-                    override val getLemma: String = nvl(t.lemma(), txt)
-                    override val getPos: String = nvl(t.tag(), "")
+                    // TODO: move it into special component?
+//                    override val getLemma: String = nvl(t.lemma(), txt)
+//                    override val getPos: String = nvl(t.tag(), "")
                     override val getIndex: Int = idx
                     override val getStartCharIndex: Int = t.beginPosition()
                     override val getEndCharIndex: Int = t.endPosition()
diff --git 
a/nlpcraft-stanford/src/test/scala/org/apache/nlpcraft/nlp/token/parser/stanford/NCStanfordNLPTokenParserSpec.scala
 
b/nlpcraft-stanford/src/test/scala/org/apache/nlpcraft/nlp/token/parser/stanford/NCStanfordNLPTokenParserSpec.scala
index 7adb176..d2ebfd3 100644
--- 
a/nlpcraft-stanford/src/test/scala/org/apache/nlpcraft/nlp/token/parser/stanford/NCStanfordNLPTokenParserSpec.scala
+++ 
b/nlpcraft-stanford/src/test/scala/org/apache/nlpcraft/nlp/token/parser/stanford/NCStanfordNLPTokenParserSpec.scala
@@ -38,5 +38,6 @@ class NCStanfordNLPTokenParserSpec:
         NCTestUtils.printTokens(toks)
 
         val words = toks.map(_.getText)
-        require(toks.map(_.getPos).distinct.sizeIs > 1)
-        require(toks.map(_.getLemma).zip(words).exists {_ != _})
+        // TODO: fix after main code fix.
+//        require(toks.map(_.getPos).distinct.sizeIs > 1)
+//        require(toks.map(_.getLemma).zip(words).exists {_ != _})
diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java
index 2d6c87d..9cb6e68 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/NCToken.java
@@ -44,16 +44,4 @@ public interface NCToken extends NCPropertyMap {
      * @return
      */
     int getEndCharIndex();
-
-    /**
-     *
-     * @return
-     */
-    String getLemma();
-
-    /**
-     *
-     * @return
-     */
-    String getPos();
 }
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/internal/impl/NCModelPipelineManager.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/internal/impl/NCModelPipelineManager.scala
index afcd63a..1f3849e 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/internal/impl/NCModelPipelineManager.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/internal/impl/NCModelPipelineManager.scala
@@ -128,13 +128,11 @@ class NCModelPipelineManager(cfg: NCModelConfig, 
pipeline: NCModelPipeline) exte
                 check()
                 e.enrich(req, cfg, toks)
 
-        val tbl = NCAsciiTable("Text", "Lemma", "POS", "Start index", "End 
index", "Properties")
+        val tbl = NCAsciiTable("Text", "Start index", "End index", 
"Properties")
 
         for (t <- toks.asScala)
             tbl += (
                 t.getText,
-                t.getLemma,
-                t.getPos,
                 t.getStartCharIndex,
                 t.getEndCharIndex,
                 mkProps(t)
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENDefaultPipeline.java 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENDefaultPipeline.java
index 244cac5..837a80f 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENDefaultPipeline.java
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENDefaultPipeline.java
@@ -22,11 +22,7 @@ import org.apache.nlpcraft.NCModelPipeline;
 import org.apache.nlpcraft.NCTokenEnricher;
 import org.apache.nlpcraft.NCTokenParser;
 import org.apache.nlpcraft.internal.util.NCResourceReader;
-import org.apache.nlpcraft.nlp.token.enricher.en.NCBracketsTokenEnricher;
-import org.apache.nlpcraft.nlp.token.enricher.en.NCDictionaryTokenEnricher;
-import org.apache.nlpcraft.nlp.token.enricher.en.NCQuotesTokenEnricher;
-import org.apache.nlpcraft.nlp.token.enricher.en.NCStopWordsTokenEnricher;
-import org.apache.nlpcraft.nlp.token.enricher.en.NСSwearWordsTokenEnricher;
+import org.apache.nlpcraft.nlp.token.enricher.en.*;
 import org.apache.nlpcraft.nlp.token.parser.opennlp.NCOpenNLPTokenParser;
 
 import java.util.Arrays;
@@ -40,13 +36,13 @@ import java.util.List;
 public class NCENDefaultPipeline implements NCModelPipeline {
     private static final NCResourceReader reader = new NCResourceReader();
 
-    private final NCTokenParser tokParser = new NCOpenNLPTokenParser(
-        reader.getPath("opennlp/en-token.bin"),
-        reader.getPath("opennlp/en-pos-maxent.bin"),
-        reader.getPath("opennlp/en-lemmatizer.dict")
-    );
+    private final NCTokenParser tokParser = new 
NCOpenNLPTokenParser(reader.getPath("opennlp/en-token.bin"));
 
     private List<NCTokenEnricher> tokenEnrichers = Arrays.asList(
+        new NCLemmaPosTokenEnricher(
+            reader.getPath("opennlp/en-pos-maxent.bin"),
+            reader.getPath("opennlp/en-lemmatizer.dict")
+        ),
         new NCStopWordsTokenEnricher(),
         new 
NСSwearWordsTokenEnricher(reader.getPath("badfilter/swear_words.txt")),
         new NCQuotesTokenEnricher(),
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENSemanticEntityParser.java 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENSemanticEntityParser.java
index 7cf3d55..08362a0 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENSemanticEntityParser.java
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/NCENSemanticEntityParser.java
@@ -39,11 +39,7 @@ public class NCENSemanticEntityParser extends 
NCSemanticEntityParser {
     }
 
     private static NCOpenNLPTokenParser mkParser() {
-        return new NCOpenNLPTokenParser(
-            reader.getPath("opennlp/en-token.bin"),
-            reader.getPath("opennlp/en-pos-maxent.bin"),
-            reader.getPath("opennlp/en-lemmatizer.dict")
-        );
+        return new 
NCOpenNLPTokenParser(reader.getPath("opennlp/en-token.bin"));
     }
 
     /**
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/entity/parser/nlp/impl/NCNLPEntityParserImpl.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/entity/parser/nlp/impl/NCNLPEntityParserImpl.scala
index 5c55f69..db0941f 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/entity/parser/nlp/impl/NCNLPEntityParserImpl.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/entity/parser/nlp/impl/NCNLPEntityParserImpl.scala
@@ -38,8 +38,6 @@ class NCNLPEntityParserImpl extends NCEntityParser:
     override def parse(req: NCRequest, cfg: NCModelConfig, toks: 
JList[NCToken]): JList[NCEntity] =
         toks.stream().map(t =>
             new NCPropertyMapAdapter with NCEntity:
-                put(s"$id:lemma", t.getLemma)
-                put(s"$id:pos", t.getPos)
                 put(s"$id:text", t.getText)
                 put(s"$id:index", t.getIndex)
                 put(s"$id:startCharIndex", t.getStartCharIndex)
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCLemmaPosTokenEnricher.java
similarity index 52%
copy from 
nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
copy to 
nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCLemmaPosTokenEnricher.java
index 629c8aa..81e3a73 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCLemmaPosTokenEnricher.java
@@ -15,42 +15,47 @@
  * limitations under the License.
  */
 
-package org.apache.nlpcraft.nlp.token.parser.opennlp;
+package org.apache.nlpcraft.nlp.token.enricher.en;
 
-import org.apache.nlpcraft.NCException;
+import org.apache.nlpcraft.NCModelConfig;
+import org.apache.nlpcraft.NCRequest;
 import org.apache.nlpcraft.NCToken;
-import org.apache.nlpcraft.NCTokenParser;
-import 
org.apache.nlpcraft.nlp.token.parser.opennlp.impl.NCOpenNLPTokenParserImpl;
+import org.apache.nlpcraft.NCTokenEnricher;
+import 
org.apache.nlpcraft.nlp.token.enricher.en.impl.NCLemmaPosTokenEnricherImpl;
 
 import java.util.List;
-import java.util.Objects;
+import java.util.Set;
 
-/*
+/**
+ * TODO: enriches with <code>lemma</code> and <code>pos</code> properties.
  *
  * Models can be downloaded from the following resources:
- *  - tokenizer: http://opennlp.sourceforge.net/models-1.5/en-token.bin
  *  - tagger: http://opennlp.sourceforge.net/models-1.5/en-pos-maxent.bin
  *  - lemmatizer: 
https://raw.githubusercontent.com/richardwilly98/elasticsearch-opennlp-auto-tagging/master/src/main/resources/models/en-lemmatizer.dict
  */
-public class NCOpenNLPTokenParser implements NCTokenParser {
-    private final NCOpenNLPTokenParserImpl impl;
+public class NCLemmaPosTokenEnricher implements NCTokenEnricher {
+    private final NCLemmaPosTokenEnricherImpl impl;
 
     /**
      *
-     *
-     * @param tokMdlSrc Local filesystem path, resources file path or URL for 
OpenNLP tokenizer model.
-     * @param posMdlSrc Local filesystem path, resources file path or URL for 
OpenNLP tagger model.
-     * @param lemmaDicSrc Local filesystem path, resources file path or URL 
for OpenNLP lemmatizer dictionary.
-     * @throws NCException
      */
-    public NCOpenNLPTokenParser(String tokMdlSrc, String posMdlSrc, String 
lemmaDicSrc) {
-        Objects.requireNonNull(tokMdlSrc, "Tokenizer model path cannot be 
null.");
+    public NCLemmaPosTokenEnricher(String posMdlSrc, String lemmaDicSrc) {
+        impl = new NCLemmaPosTokenEnricherImpl(posMdlSrc, lemmaDicSrc);
+    }
 
-        impl = new NCOpenNLPTokenParserImpl(tokMdlSrc, posMdlSrc, lemmaDicSrc);
+    @Override
+    public void enrich(NCRequest req, NCModelConfig cfg, List<NCToken> toks) {
+        assert impl != null;
+        impl.enrich(req, cfg, toks);
+    }
+
+    @Override
+    public void onStart(NCModelConfig cfg) {
+        impl.onStart(cfg);
     }
 
     @Override
-    public List<NCToken> tokenize(String text) {
-        return impl.tokenize(text);
+    public void onStop(NCModelConfig cfg) {
+        impl.onStop(cfg);
     }
 }
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
index 59c1847..41feb75 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCDictionaryTokenEnricherImpl.scala
@@ -31,5 +31,7 @@ class NCDictionaryTokenEnricherImpl extends NCTokenEnricher:
     init()
 
     private def init(): Unit = dict = 
NCUtils.readResource("moby/354984si.ngl", "iso-8859-1").toSet
+    private def getLemma(t: NCToken): String = 
t.getOpt("lemma").orElseThrow(() => throw new NCException("Lemma not found in 
token."))
+
     override def enrich(req: NCRequest, cfg: NCModelConfig, toks: 
JList[NCToken]): Unit =
-        toks.forEach(t => t.put("dict", dict.contains(t.getLemma)))
+        toks.forEach(t => t.put("dict", dict.contains(getLemma(t))))
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCLemmaPosTokenEnricherImpl.scala
similarity index 62%
copy from 
nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
copy to 
nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCLemmaPosTokenEnricherImpl.scala
index b52d32e..16a1d64 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCLemmaPosTokenEnricherImpl.scala
@@ -15,35 +15,28 @@
  * limitations under the License.
  */
 
-package org.apache.nlpcraft.nlp.token.parser.opennlp.impl
+package org.apache.nlpcraft.nlp.token.enricher.en.impl
 
 import com.typesafe.scalalogging.LazyLogging
-import opennlp.tools.lemmatizer.*
+import opennlp.tools.lemmatizer.DictionaryLemmatizer
 import opennlp.tools.postag.*
-import opennlp.tools.stemmer.*
-import opennlp.tools.tokenize.*
+import opennlp.tools.stemmer.PorterStemmer
 import org.apache.nlpcraft.*
-import org.apache.nlpcraft.internal.util.NCUtils
-
+import org.apache.nlpcraft.internal.util.*
 import java.io.*
 import java.util
-import java.util.stream.Collectors
-import java.util.{Collections, List as JList, Set as JSet}
 import scala.concurrent.ExecutionContext
 import scala.jdk.CollectionConverters.*
+import java.util.List as JList
 
 /**
-  *
-  * @param tokMdl
+  * 
   * @param posMdlSrc
   * @param lemmaDicSrc
   */
-class NCOpenNLPTokenParserImpl(tokMdl: String,  posMdlSrc: String, 
lemmaDicSrc: String) extends NCTokenParser with LazyLogging:
-    require(tokMdl != null)
-
+class NCLemmaPosTokenEnricherImpl(posMdlSrc: String, lemmaDicSrc: String) 
extends NCTokenEnricher with LazyLogging:
     private var tagger: POSTaggerME = _
     private var lemmatizer: DictionaryLemmatizer = _
-    private var tokenizer: TokenizerME = _
 
     init()
 
@@ -58,21 +51,16 @@ class NCOpenNLPTokenParserImpl(tokMdl: String,  posMdlSrc: 
String, lemmaDicSrc:
                 if lemmaDicSrc != null then
                     lemmatizer = new 
DictionaryLemmatizer(NCUtils.getStream(lemmaDicSrc))
                     logger.trace(s"Loaded resource: $lemmaDicSrc")
-            },
-            () => {
-                tokenizer = new TokenizerME(new 
TokenizerModel(NCUtils.getStream(tokMdl)))
-                logger.trace(s"Loaded resource: $tokMdl")
             }
         )(ExecutionContext.Implicits.global)
 
-    override def tokenize(text: String): JList[NCToken] =
-        case class Holder(text: String, start: Int, end: Int)
+    override def enrich(req: NCRequest, cfg: NCModelConfig, toksList: 
JList[NCToken]): Unit =
+        val toks = toksList.asScala
+        val txts = toks.map(_.getText).toArray
 
         this.synchronized {
-            val hs = tokenizer.tokenizePos(text).map(p => 
Holder(p.getCoveredText(text).toString, p.getStart, p.getEnd))
-            val toks = hs.map(_.text)
-            val poses = if tagger != null then tagger.tag(toks) else 
toks.map(_ => "")
-            var lemmas = if lemmatizer != null then lemmatizer.lemmatize(toks, 
poses) else toks
+            val poses = if tagger != null then tagger.tag(txts) else 
txts.map(_ => "")
+            var lemmas = if lemmatizer != null then lemmatizer.lemmatize(txts, 
poses) else txts
 
             require(toks.length == poses.length && toks.length == 
lemmas.length)
 
@@ -85,20 +73,17 @@ class NCOpenNLPTokenParserImpl(tokMdl: String,  posMdlSrc: 
String, lemmaDicSrc:
 
             if suspIdxs.nonEmpty && lemmatizer != null then
                 val fixes: Map[Int, String] = lemmatizer.
-                    lemmatize(suspIdxs.map(i => toks(i)), suspIdxs.map(_ => 
"NNN")).
+                    lemmatize(suspIdxs.map(i => txts(i)), suspIdxs.map(_ => 
"NNN")).
                     zipWithIndex.
                     flatMap { (lemma, i) => Option.when(lemma != 
"0")(suspIdxs(i) -> lemma) }.toMap
                 lemmas = lemmas.zipWithIndex.map {
                     (lemma, idx) => fixes.getOrElse(idx, lemma)
                 }
 
-            hs.zip(poses).zip(lemmas).zipWithIndex.map { case (((h, pos), 
lemma), idx) =>
-                new NCPropertyMapAdapter with NCToken:
-                    override inline def getText: String = h.text
-                    override val getLemma: String = lemma
-                    override val getPos: String = pos
-                    override val getIndex: Int = idx
-                    override val getStartCharIndex: Int = h.start
-                    override val getEndCharIndex: Int = h.end
-            }.toSeq.asJava
+            toks.zip(poses).zip(lemmas).foreach { case ((t, pos), lemma) =>
+                t.put("pos", pos)
+                t.put("lemma", lemma)
+                () // Otherwise - NPE.
+            }
         }
+
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCQuotesTokenEnricherImpl.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCQuotesTokenEnricherImpl.scala
index 567000c..d804a18 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCQuotesTokenEnricherImpl.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCQuotesTokenEnricherImpl.scala
@@ -28,7 +28,8 @@ import scala.jdk.CollectionConverters.*
   */
 class NCQuotesTokenEnricherImpl extends NCTokenEnricher with LazyLogging:
     private final val Q_POS: Set[String] = Set("``", "''")
-    private def isQuote(t: NCToken): Boolean = Q_POS.contains(t.getPos)
+    private def getPos(t: NCToken): String = t.getOpt("pos").orElseThrow(() => 
throw new NCException("POS not found in token."))
+    private def isQuote(t: NCToken): Boolean = Q_POS.contains(getPos(t))
 
     override def enrich(req: NCRequest, cfg: NCModelConfig, toksList: 
JList[NCToken]): Unit =
         val toks = toksList.asScala
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCStopWordsTokenEnricherImpl.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCStopWordsTokenEnricherImpl.scala
index e2a3dda..6460290 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCStopWordsTokenEnricherImpl.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/enricher/en/impl/NCStopWordsTokenEnricherImpl.scala
@@ -98,8 +98,10 @@ object NCStopWordsTokenEnricherImpl:
         "percent"
     )
 
-    private def isQuote(t: NCToken): Boolean = Q_POS.contains(t.getPos)
-    private def toLemmaKey(toks: Seq[NCToken]): String = 
toks.map(_.getLemma).mkString(" ")
+    private def getPos(t: NCToken): String = t.getOpt("pos").orElseThrow(() => 
throw new NCException(s"POS not found in token: ${t.keysSet()}"))
+    private def getLemma(t: NCToken): String = 
t.getOpt("lemma").orElseThrow(() => throw new NCException(s"Lemma not found in 
token: ${t.keysSet()}"))
+    private def isQuote(t: NCToken): Boolean = Q_POS.contains(getPos(t))
+    private def toLemmaKey(toks: Seq[NCToken]): String = 
toks.map(getLemma).mkString(" ")
     private def toValueKey(toks: Seq[NCToken]): String = 
toks.map(_.getText.toLowerCase).mkString(" ")
     private def toOriginalKey(toks: Seq[NCToken]): String = 
toks.map(_.getText).mkString(" ")
     private def isStopWord(t: NCToken): Boolean = 
t.getOpt[Boolean]("stopword").orElse(false)
@@ -263,7 +265,7 @@ class NCStopWordsTokenEnricherImpl(addStopsSet: 
JSet[String], exclStopsSet: JSet
         def matches(toks: Seq[NCToken]): Boolean =
             val posOpt = toks.size match
                 case 0 => throw new AssertionError(s"Unexpected empty tokens.")
-                case 1 => Option(toks.head.getPos)
+                case 1 => Option(getPos(toks.head))
                 case _ => None
 
             // Hash access.
@@ -462,7 +464,7 @@ class NCStopWordsTokenEnricherImpl(addStopsSet: 
JSet[String], exclStopsSet: JSet
         var stop = true
 
         for ((tok, idx) <- ns.zipWithIndex if idx != lastIdx && 
!isStopWord(tok) && !isException(Seq(tok)) &&
-            stopPoses.contains(tok.getPos) && isStopWord(ns(idx + 1)))
+            stopPoses.contains(getPos(tok)) && isStopWord(ns(idx + 1)))
             stops += tok
             stop = false
 
@@ -496,7 +498,7 @@ class NCStopWordsTokenEnricherImpl(addStopsSet: 
JSet[String], exclStopsSet: JSet
             var stop = true
 
             for ((tok, idx) <- ns.zipWithIndex if idx != max && 
!isStopWord(tok) && !exclStems.contains(stem(tok.getText)) &&
-                POSES.contains(tok.getPos) && isStopWord(ns(idx + 1)))
+                POSES.contains(getPos(tok)) && isStopWord(ns(idx + 1)))
                 stops += tok
                 stop = false
 
@@ -518,8 +520,8 @@ class NCStopWordsTokenEnricherImpl(addStopsSet: 
JSet[String], exclStopsSet: JSet
 
         for (tok <- toks)
             val idx = tok.getIndex
-            val pos = tok.getPos
-            val lemma = tok.getLemma
+            val pos = getPos(tok)
+            val lemma = getLemma(tok)
             val st = stem(tok.getText)
 
             def isFirst: Boolean = idx == 0
@@ -528,7 +530,7 @@ class NCStopWordsTokenEnricherImpl(addStopsSet: 
JSet[String], exclStopsSet: JSet
             def prev(): NCToken = toks(idx - 1)
             def isCommonVerbs(firstVerb: String, secondVerb: String): Boolean =
                 isVerb(pos) && lemma == secondVerb ||
-                    (isVerb(pos) && lemma == firstVerb && !isLast && 
isVerb(next().getPos) && next().getLemma == secondVerb)
+                    (isVerb(pos) && lemma == firstVerb && !isLast && 
isVerb(getPos(next())) && getLemma(next()) == secondVerb)
 
             // +---------------------------------+
             // | Pass #1.                        |
@@ -539,9 +541,9 @@ class NCStopWordsTokenEnricherImpl(addStopsSet: 
JSet[String], exclStopsSet: JSet
                     // 1. Word from 'percentage' list.
                     percents.contains(st) &&
                         // 2. Number before.
-                        !isFirst && prev().getPos == "CD" &&
+                        !isFirst && getPos(prev()) == "CD" &&
                         // 3. It's last word or any words after except numbers.
-                        (isLast || next().getPos != "CD")
+                        (isLast || getPos(next()) != "CD")
                     ) ||
                 // be, was, is etc. or has been etc.
                 isCommonVerbs("have", "be") ||
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
index 629c8aa..54bee08 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParser.java
@@ -29,8 +29,6 @@ import java.util.Objects;
  *
  * Models can be downloaded from the following resources:
  *  - tokenizer: http://opennlp.sourceforge.net/models-1.5/en-token.bin
- *  - tagger: http://opennlp.sourceforge.net/models-1.5/en-pos-maxent.bin
- *  - lemmatizer: 
https://raw.githubusercontent.com/richardwilly98/elasticsearch-opennlp-auto-tagging/master/src/main/resources/models/en-lemmatizer.dict
  */
 public class NCOpenNLPTokenParser implements NCTokenParser {
     private final NCOpenNLPTokenParserImpl impl;
@@ -43,10 +41,10 @@ public class NCOpenNLPTokenParser implements NCTokenParser {
      * @param lemmaDicSrc Local filesystem path, resources file path or URL 
for OpenNLP lemmatizer dictionary.
      * @throws NCException
      */
-    public NCOpenNLPTokenParser(String tokMdlSrc, String posMdlSrc, String 
lemmaDicSrc) {
+    public NCOpenNLPTokenParser(String tokMdlSrc) {
         Objects.requireNonNull(tokMdlSrc, "Tokenizer model path cannot be 
null.");
 
-        impl = new NCOpenNLPTokenParserImpl(tokMdlSrc, posMdlSrc, lemmaDicSrc);
+        impl = new NCOpenNLPTokenParserImpl(tokMdlSrc);
     }
 
     @Override
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
index b52d32e..51755ce 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/impl/NCOpenNLPTokenParserImpl.scala
@@ -18,87 +18,38 @@
 package org.apache.nlpcraft.nlp.token.parser.opennlp.impl
 
 import com.typesafe.scalalogging.LazyLogging
-import opennlp.tools.lemmatizer.*
-import opennlp.tools.postag.*
-import opennlp.tools.stemmer.*
 import opennlp.tools.tokenize.*
 import org.apache.nlpcraft.*
 import org.apache.nlpcraft.internal.util.NCUtils
 
 import java.io.*
 import java.util
-import java.util.stream.Collectors
-import java.util.{Collections, List as JList, Set as JSet}
-import scala.concurrent.ExecutionContext
+import java.util.List as JList
 import scala.jdk.CollectionConverters.*
 
 /**
   *
   * @param tokMdl
-  * @param posMdlSrc
-  * @param lemmaDicSrc
   */
-class NCOpenNLPTokenParserImpl(tokMdl: String,  posMdlSrc: String, 
lemmaDicSrc: String) extends NCTokenParser with LazyLogging:
+class NCOpenNLPTokenParserImpl(tokMdl: String) extends NCTokenParser with 
LazyLogging:
     require(tokMdl != null)
 
-    private var tagger: POSTaggerME = _
-    private var lemmatizer: DictionaryLemmatizer = _
-    private var tokenizer: TokenizerME = _
+    @volatile private var tokenizer: TokenizerME = _
 
     init()
 
     private def init(): Unit =
-        NCUtils.execPar(
-            () => {
-                if posMdlSrc != null then
-                    tagger = new POSTaggerME(new 
POSModel(NCUtils.getStream(posMdlSrc)))
-                    logger.trace(s"Loaded resource: $posMdlSrc")
-            },
-            () => {
-                if lemmaDicSrc != null then
-                    lemmatizer = new 
DictionaryLemmatizer(NCUtils.getStream(lemmaDicSrc))
-                    logger.trace(s"Loaded resource: $lemmaDicSrc")
-            },
-            () => {
-                tokenizer = new TokenizerME(new 
TokenizerModel(NCUtils.getStream(tokMdl)))
-                logger.trace(s"Loaded resource: $tokMdl")
-            }
-        )(ExecutionContext.Implicits.global)
+        tokenizer = new TokenizerME(new 
TokenizerModel(NCUtils.getStream(tokMdl)))
 
-    override def tokenize(text: String): JList[NCToken] =
-        case class Holder(text: String, start: Int, end: Int)
+        logger.trace(s"Loaded resource: $tokMdl")
 
+    override def tokenize(text: String): JList[NCToken] =
         this.synchronized {
-            val hs = tokenizer.tokenizePos(text).map(p => 
Holder(p.getCoveredText(text).toString, p.getStart, p.getEnd))
-            val toks = hs.map(_.text)
-            val poses = if tagger != null then tagger.tag(toks) else 
toks.map(_ => "")
-            var lemmas = if lemmatizer != null then lemmatizer.lemmatize(toks, 
poses) else toks
-
-            require(toks.length == poses.length && toks.length == 
lemmas.length)
-
-            // For some reasons lemmatizer (en-lemmatizer.dict) marks some 
words with non-existent POS 'NNN'
-            // Valid POS list: 
https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
-            val suspIdxs = lemmas.zip(poses).zipWithIndex.flatMap {
-                // "0" is flag that lemma cannot be obtained for some reasons.
-                case ((lemma, pos), i) => Option.when(lemma == "O" && pos == 
"NN")(i)
-            }
-
-            if suspIdxs.nonEmpty && lemmatizer != null then
-                val fixes: Map[Int, String] = lemmatizer.
-                    lemmatize(suspIdxs.map(i => toks(i)), suspIdxs.map(_ => 
"NNN")).
-                    zipWithIndex.
-                    flatMap { (lemma, i) => Option.when(lemma != 
"0")(suspIdxs(i) -> lemma) }.toMap
-                lemmas = lemmas.zipWithIndex.map {
-                    (lemma, idx) => fixes.getOrElse(idx, lemma)
-                }
-
-            hs.zip(poses).zip(lemmas).zipWithIndex.map { case (((h, pos), 
lemma), idx) =>
-                new NCPropertyMapAdapter with NCToken:
-                    override inline def getText: String = h.text
-                    override val getLemma: String = lemma
-                    override val getPos: String = pos
+            tokenizer.tokenizePos(text).zipWithIndex.map { (p, idx) =>
+                new NCPropertyMapAdapter with NCToken :
+                    override val getText: String = 
p.getCoveredText(text).toString
                     override val getIndex: Int = idx
-                    override val getStartCharIndex: Int = h.start
-                    override val getEndCharIndex: Int = h.end
+                    override val getStartCharIndex: Int = p.getStart
+                    override val getEndCharIndex: Int = p.getEnd
             }.toSeq.asJava
-        }
+        }
\ No newline at end of file
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/benchmark/token/parser/opennlp/NCEnOpenNlpTokenParserBenchmark.java
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/benchmark/token/parser/opennlp/NCEnOpenNlpTokenParserBenchmark.java
index 94defba..69fe200 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/benchmark/token/parser/opennlp/NCEnOpenNlpTokenParserBenchmark.java
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/benchmark/token/parser/opennlp/NCEnOpenNlpTokenParserBenchmark.java
@@ -61,11 +61,7 @@ public class NCEnOpenNlpTokenParserBenchmark {
     public void setUp() {
         NCResourceReader reader = new NCResourceReader();
 
-        parser = new NCOpenNLPTokenParser(
-            reader.getPath("opennlp/en-token.bin"),
-            reader.getPath("opennlp/en-pos-maxent.bin"),
-            reader.getPath("opennlp/en-lemmatizer.dict")
-        );
+        parser = new 
NCOpenNLPTokenParser(reader.getPath("opennlp/en-token.bin"));
     }
 
     @Benchmark
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/entity/parser/semantic/NCSemanticEntityParserSpec.scala
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/entity/parser/semantic/NCSemanticEntityParserSpec.scala
index aced085..b403cc3 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/entity/parser/semantic/NCSemanticEntityParserSpec.scala
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/entity/parser/semantic/NCSemanticEntityParserSpec.scala
@@ -18,10 +18,10 @@
 package org.apache.nlpcraft.nlp.entity.parser.semantic
 
 import org.apache.nlpcraft.*
-import org.apache.nlpcraft.internal.util.NCUtils
+import org.apache.nlpcraft.internal.util.{NCResourceReader, NCUtils}
 import org.apache.nlpcraft.nlp.entity.parser.opennlp.NCOpenNLPEntityParser
 import 
org.apache.nlpcraft.nlp.entity.parser.semantic.impl.en.NCEnSemanticPorterStemmer
-import org.apache.nlpcraft.nlp.token.enricher.en.NCStopWordsTokenEnricher
+import org.apache.nlpcraft.nlp.token.enricher.en.*
 import org.apache.nlpcraft.nlp.util.*
 import org.apache.nlpcraft.nlp.util.opennlp.*
 import org.junit.jupiter.api.*
@@ -88,6 +88,13 @@ class NCSemanticEntityParserSpec:
 
     private val stopWordsEnricher = new NCStopWordsTokenEnricher()
 
+    private val reader = new NCResourceReader()
+
+    private val lemmaPosEnricher = new NCLemmaPosTokenEnricher(
+        reader.getPath("opennlp/en-pos-maxent.bin"),
+        reader.getPath("opennlp/en-lemmatizer.dict")
+    )
+
     /**
       *
       * @param txt
@@ -99,6 +106,7 @@ class NCSemanticEntityParserSpec:
         val req = NCTestRequest(txt)
         val toks = EN_PIPELINE.getTokenParser.tokenize(txt)
 
+        lemmaPosEnricher.enrich(req, CFG, toks)
         stopWordsEnricher.enrich(req, CFG, toks)
 
         NCTestUtils.printTokens(toks.asScala.toSeq)
@@ -127,6 +135,7 @@ class NCSemanticEntityParserSpec:
         val req = NCTestRequest(txt)
         val toks = EN_PIPELINE.getTokenParser.tokenize(txt)
 
+        lemmaPosEnricher.enrich(req, CFG, toks)
         stopWordsEnricher.enrich(req, CFG, toks)
 
         NCTestUtils.printTokens(toks.asScala.toSeq)
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCDictionaryTokenEnricherSpec.scala
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCDictionaryTokenEnricherSpec.scala
index dce9e9e..0bf56b6 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCDictionaryTokenEnricherSpec.scala
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCDictionaryTokenEnricherSpec.scala
@@ -17,6 +17,7 @@
 
 package org.apache.nlpcraft.nlp.token.enricher.en
 
+import org.apache.nlpcraft.internal.util.NCResourceReader
 import org.apache.nlpcraft.nlp.token.enricher.en.*
 import org.apache.nlpcraft.nlp.util.*
 import org.apache.nlpcraft.nlp.util.opennlp.*
@@ -28,16 +29,27 @@ import scala.jdk.CollectionConverters.*
   *
   */
 class NCDictionaryTokenEnricherSpec:
-    private val enricher = new NCDictionaryTokenEnricher()
+    private val dictEnricher = new NCDictionaryTokenEnricher()
+
+    private val reader = new NCResourceReader()
+
+    private val lemmaPosEnricher = new NCLemmaPosTokenEnricher(
+        reader.getPath("opennlp/en-pos-maxent.bin"),
+        reader.getPath("opennlp/en-lemmatizer.dict")
+    )    
 
     @Test
     def test(): Unit =
-        val toks = EN_PIPELINE.getTokenParser.tokenize("milk 
XYZ").asScala.toSeq
+        val txt = "milk XYZ"
+        val toks = EN_PIPELINE.getTokenParser.tokenize(txt).asScala.toSeq
 
         require(toks.head.getOpt[Boolean]("dict:en").isEmpty)
         require(toks.last.getOpt[Boolean]("dict:en").isEmpty)
 
-        enricher.enrich(null, CFG, toks.asJava)
+        val req = NCTestRequest(txt)
+
+        lemmaPosEnricher.enrich(req, CFG, toks.asJava)
+        dictEnricher.enrich(req, CFG, toks.asJava)
         NCTestUtils.printTokens(toks)
 
         require(toks.head.get[Boolean]("dict"))
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCQuotesTokenEnricherSpec.scala
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCQuotesTokenEnricherSpec.scala
index d236b95..2071b06 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCQuotesTokenEnricherSpec.scala
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCQuotesTokenEnricherSpec.scala
@@ -18,6 +18,7 @@
 package org.apache.nlpcraft.nlp.token.enricher.en
 
 import org.apache.nlpcraft.NCToken
+import org.apache.nlpcraft.internal.util.NCResourceReader
 import org.apache.nlpcraft.nlp.token.enricher.en.*
 import org.apache.nlpcraft.nlp.util.*
 import org.apache.nlpcraft.nlp.util.opennlp.*
@@ -29,7 +30,14 @@ import scala.jdk.CollectionConverters.*
   *
   */
 class NCQuotesTokenEnricherSpec:
-    private val enricher = new NCQuotesTokenEnricher
+    private val reader = new NCResourceReader()
+
+    private val lemmaPosEnricher = new NCLemmaPosTokenEnricher(
+        reader.getPath("opennlp/en-pos-maxent.bin"),
+        reader.getPath("opennlp/en-lemmatizer.dict")
+    )
+
+    private val quoteEnricher = new NCQuotesTokenEnricher
 
     /**
       *
@@ -39,8 +47,10 @@ class NCQuotesTokenEnricherSpec:
     private def check(txt: String, quotes: Set[Integer]): Unit =
         val toks = EN_PIPELINE.getTokenParser.tokenize(txt)
         val toksSeq = toks.asScala.toSeq
-        
-        enricher.enrich(NCTestRequest(txt), CFG, toks)
+
+        val req = NCTestRequest(txt)
+        lemmaPosEnricher.enrich(req, CFG, toks)
+        quoteEnricher.enrich(req, CFG, toks)
         
         NCTestUtils.printTokens(toksSeq)
         toksSeq.foreach (tok => require(!(tok.get[Boolean]("quoted") ^ 
quotes.contains(tok.getIndex))))
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCStopWordsEnricherSpec.scala
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCStopWordsEnricherSpec.scala
index 8136332..e8cd1c9 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCStopWordsEnricherSpec.scala
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/enricher/en/NCStopWordsEnricherSpec.scala
@@ -18,6 +18,7 @@
 package org.apache.nlpcraft.nlp.token.enricher.en
 
 import org.apache.nlpcraft.*
+import org.apache.nlpcraft.internal.util.NCResourceReader
 import org.apache.nlpcraft.nlp.token.enricher.en.*
 import org.apache.nlpcraft.nlp.util.*
 import org.apache.nlpcraft.nlp.util.opennlp.*
@@ -30,20 +31,30 @@ import scala.jdk.CollectionConverters.*
   *
   */
 class NCStopWordsEnricherSpec:
+    private val reader = new NCResourceReader()
+
+    private val lemmaPosEnricher = new NCLemmaPosTokenEnricher(
+        reader.getPath("opennlp/en-pos-maxent.bin"),
+        reader.getPath("opennlp/en-lemmatizer.dict")
+    )
+
     /**
       *
-      * @param enricher
+      * @param stopEnricher
       * @param txt
       * @param boolVals
       */
-    private def test(enricher: NCStopWordsTokenEnricher, txt: String, 
boolVals: Boolean*): Unit =
+    private def test(stopEnricher: NCStopWordsTokenEnricher, txt: String, 
boolVals: Boolean*): Unit =
         val toksList = EN_PIPELINE.getTokenParser.tokenize(txt)
         require(toksList.size == boolVals.size)
         val toks = toksList.asScala.toSeq
 
         toks.foreach(tok => require(tok.getOpt[Boolean]("stopword").isEmpty))
 
-        enricher.enrich(NCTestRequest(txt), CFG, toksList)
+        val req = NCTestRequest(txt)
+
+        lemmaPosEnricher.enrich(req, CFG, toksList)
+        stopEnricher.enrich(req, CFG, toksList)
 
         NCTestUtils.printTokens(toks)
         toks.zip(boolVals).foreach { (tok, boolVal) => 
require(tok.get[Boolean]("stopword") == boolVal) }
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParserSpec.scala
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParserSpec.scala
index 9379626..dc56f8e 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParserSpec.scala
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/token/parser/opennlp/NCOpenNLPTokenParserSpec.scala
@@ -32,14 +32,24 @@ import scala.jdk.CollectionConverters.*
   *
   */
 class NCOpenNLPTokenParserSpec:
-    private val enricher = new NCStopWordsTokenEnricher(null, null)
+    private val reader = new NCResourceReader()
+
+    private val lemmaPosEnricher = new NCLemmaPosTokenEnricher(
+        reader.getPath("opennlp/en-pos-maxent.bin"),
+        reader.getPath("opennlp/en-lemmatizer.dict")
+    )
+
+    private val stopEnricher = new NCStopWordsTokenEnricher(null, null)
 
     private def isStopWord(t: NCToken): Boolean = t.get[Boolean]("stopword")
 
     private def test(txt: String, validate: Seq[NCToken] => _): Unit =
         val toksList = EN_PIPELINE.getTokenParser.tokenize(txt)
 
-        enricher.enrich(NCTestRequest(txt), CFG, toksList)
+        val req = NCTestRequest(txt)
+
+        lemmaPosEnricher.enrich(req, CFG, toksList)
+        stopEnricher.enrich(req, CFG, toksList)
 
         val toks = toksList.asScala.toSeq
         assert(toks.nonEmpty)
@@ -96,45 +106,4 @@ class NCOpenNLPTokenParserSpec:
             // Nested brackets.
             "< < [ a ] > >",
             toks => require(!isStopWord(toks.find(_.getText == "a").get))
-        )
-
-    @Test
-    def testNullable(): Unit =
-        val reader = new NCResourceReader
-        val txt = "parents had files"
-
-        // 1. Nullable.
-        var parser = new NCOpenNLPTokenParser(
-            reader.getPath("opennlp/en-token.bin"),
-            null,
-            null
-        )
-
-        var tbl = NCAsciiTable("Text", "Lemma", "POS")
-
-        for (t <- parser.tokenize(txt).asScala)
-            tbl += (t.getText, t.getLemma, t.getPos)
-
-            require(t.getPos.isEmpty)
-            require(t.getText == t.getLemma)
-
-        println(tbl.toString)
-
-        // 2. Not nullable.
-        parser = new NCOpenNLPTokenParser(
-            reader.getPath("opennlp/en-token.bin"),
-            reader.getPath("opennlp/en-pos-maxent.bin"),
-            reader.getPath("opennlp/en-lemmatizer.dict")
-        )
-
-        tbl = NCAsciiTable("Text", "Lemma", "POS")
-
-        for (t <- parser.tokenize(txt).asScala)
-            tbl += (t.getText, t.getLemma, t.getPos)
-
-            require(t.getPos.nonEmpty)
-            require(t.getText != t.getLemma)
-
-        println(tbl.toString)
-
-
+        )
\ No newline at end of file
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestToken.scala 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestToken.scala
index bf63870..9344898 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestToken.scala
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestToken.scala
@@ -47,7 +47,4 @@ case class NCTestToken(
     override def getText: String = txt
     override def getIndex: Int = idx
     override def getStartCharIndex: Int = start
-    override def getEndCharIndex: Int = end
-    override def getLemma: String = if lemma  != null then lemma else txt
-    override def getPos: String = if pos  != null then pos else "undefined"
-
+    override def getEndCharIndex: Int = end
\ No newline at end of file
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestUtils.scala 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestUtils.scala
index f95e11a..0c83ab1 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestUtils.scala
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/NCTestUtils.scala
@@ -41,14 +41,12 @@ object NCTestUtils:
       * @param toks
       */
     def printTokens(toks: Seq[NCToken]): Unit =
-        val tbl = NCAsciiTable("Text", "Index", "POS", "Lemma", "Stopword", 
"Start", "End", "Properties")
+        val tbl = NCAsciiTable("Text", "Index", "Stopword", "Start", "End", 
"Properties")
 
         for (t <- toks)
             tbl += (
                 t.getText,
                 t.getIndex,
-                t.getPos,
-                t.getLemma,
                 t.getOpt[Boolean]("stopword").toScala match
                     case Some(b) => b.toString
                     case None => "undef."
diff --git 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/opennlp/NCTestConfigJava.java
 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/opennlp/NCTestConfigJava.java
index 6d1ec4d..25774b6 100644
--- 
a/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/opennlp/NCTestConfigJava.java
+++ 
b/nlpcraft/src/test/scala/org/apache/nlpcraft/nlp/util/opennlp/NCTestConfigJava.java
@@ -38,10 +38,6 @@ public class NCTestConfigJava {
      *
      */
     public static final NCTestPipeline EN_PIPELINE = new NCTestPipeline(
-        new NCOpenNLPTokenParser(
-            reader.getPath("opennlp/en-token.bin"),
-            reader.getPath("opennlp/en-pos-maxent.bin"),
-            reader.getPath("opennlp/en-lemmatizer.dict")
-        )
+        new NCOpenNLPTokenParser(reader.getPath("opennlp/en-token.bin"))
     );
 }

Reply via email to