This is an automated email from the ASF dual-hosted git repository.
sergeykamov pushed a commit to branch NLPCRAFT-443
in repository https://gitbox.apache.org/repos/asf/incubator-nlpcraft.git
The following commit(s) were added to refs/heads/NLPCRAFT-443 by this push:
new 2ff184a WIP.
2ff184a is described below
commit 2ff184ae19ce3914b6205a3404f8ec1071021249
Author: Sergey Kamov <[email protected]>
AuthorDate: Sat Sep 25 20:56:33 2021 +0300
WIP.
---
.../nlpcraft/probe/mgrs/NCProbeIdlToken.scala | 96 +++++++++++++++++
.../nlpcraft/probe/mgrs/NCProbeSynonym.scala | 4 -
.../mgrs/nlp/enrichers/model/NCModelEnricher.scala | 117 +++++----------------
.../probe/mgrs/synonyms/NCSynonymsManager.scala | 90 ++++++++--------
4 files changed, 166 insertions(+), 141 deletions(-)
diff --git
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeIdlToken.scala
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeIdlToken.scala
new file mode 100644
index 0000000..69a2f2f
--- /dev/null
+++
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeIdlToken.scala
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nlpcraft.probe.mgrs
+
+import org.apache.nlpcraft.common.nlp.NCNlpSentenceToken
+import org.apache.nlpcraft.model.{NCToken, _}
+
+/**
+ *
+ * @param isToken
+ * @param isWord
+ * @param token
+ * @param word
+ * @param origText
+ * @param wordIndexes
+ * @param minIndex
+ * @param maxIndex
+ */
+case class NCProbeIdlToken(
+ isToken: Boolean,
+ isWord: Boolean,
+ token: NCToken,
+ word: NCNlpSentenceToken,
+ origText: String,
+ wordIndexes: Set[Int],
+ minIndex: Int,
+ maxIndex: Int
+) {
+ private lazy val hash = if (isToken) Seq(wordIndexes,
token.getId).hashCode() else wordIndexes.hashCode()
+
+ override def hashCode(): Int = hash
+
+ def isSubsetOf(minIndex: Int, maxIndex: Int, indexes: Set[Int]): Boolean =
+ if (this.minIndex > maxIndex || this.maxIndex < minIndex)
+ false
+ else
+ wordIndexes.subsetOf(indexes)
+
+ override def equals(obj: Any): Boolean = obj match {
+ case x: NCProbeIdlToken =>
+ hash == x.hash && (isToken && x.isToken && token == x.token ||
isWord && x.isWord && word == x.word)
+ case _ => false
+ }
+
+ // Added for debug reasons.
+ override def toString: String = {
+ val idxs = wordIndexes.mkString(",")
+
+ if (isToken && token.getId != "nlpcraft:nlp") s"'$origText'
(${token.getId}) [$idxs]]"
+ else s"'$origText' [$idxs]"
+ }
+}
+
+/**
+ *
+ */
+object NCProbeIdlToken {
+ def apply(t: NCToken): NCProbeIdlToken =
+ NCProbeIdlToken(
+ isToken = true,
+ isWord = false,
+ token = t,
+ word = null,
+ origText = t.origText,
+ wordIndexes = t.wordIndexes.toSet,
+ minIndex = t.wordIndexes.head,
+ maxIndex = t.wordIndexes.last
+ )
+
+ def apply(t: NCNlpSentenceToken): NCProbeIdlToken =
+ NCProbeIdlToken(
+ isToken = false,
+ isWord = true,
+ token = null,
+ word = t,
+ origText = t.origText,
+ wordIndexes = t.wordIndexes.toSet,
+ minIndex = t.wordIndexes.head,
+ maxIndex = t.wordIndexes.last
+ )
+}
diff --git
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
index aa13574..2b533b3 100644
---
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
+++
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
@@ -17,8 +17,6 @@
package org.apache.nlpcraft.probe.mgrs
-import org.apache.nlpcraft.common.nlp.NCNlpSentenceToken
-import org.apache.nlpcraft.model._
import org.apache.nlpcraft.probe.mgrs.NCProbeSynonymChunkKind._
import scala.collection.mutable
@@ -138,8 +136,6 @@ class NCProbeSynonym(
}
object NCProbeSynonym {
- type NCIdlToken = Either[NCToken, NCNlpSentenceToken]
-
/**
*
* @param isElementId
diff --git
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
index ded7928..68cecc2 100644
---
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
+++
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
@@ -23,13 +23,12 @@ import org.apache.nlpcraft.common.nlp.NCNlpSentence.NoteLink
import org.apache.nlpcraft.common.nlp.{NCNlpSentence => Sentence,
NCNlpSentenceNote => NlpNote, NCNlpSentenceToken => NlpToken}
import org.apache.nlpcraft.model._
import org.apache.nlpcraft.model.impl.NCTokenImpl
-import org.apache.nlpcraft.probe.mgrs.NCProbeSynonym.NCIdlToken
import
org.apache.nlpcraft.probe.mgrs.NCProbeSynonymChunkKind.NCSynonymChunkKind
import org.apache.nlpcraft.probe.mgrs.nlp.NCProbeEnricher
import org.apache.nlpcraft.probe.mgrs.nlp.impl.NCRequestImpl
import org.apache.nlpcraft.probe.mgrs.sentence.NCSentenceManager
import org.apache.nlpcraft.probe.mgrs.synonyms.NCSynonymsManager
-import org.apache.nlpcraft.probe.mgrs.{NCProbeModel, NCProbeVariants,
NCTokenPartKey, NCProbeSynonym => Synonym}
+import org.apache.nlpcraft.probe.mgrs.{NCProbeIdlToken => IdlToken,
NCProbeModel, NCProbeVariants, NCTokenPartKey, NCProbeSynonym => Synonym}
import java.io.Serializable
import java.util.{List => JList}
@@ -43,74 +42,11 @@ import scala.jdk.CollectionConverters.{ListHasAsScala,
MapHasAsJava, MapHasAsSca
object NCModelEnricher extends NCProbeEnricher {
type TokType = (NCToken, NCSynonymChunkKind)
- object Complex {
- def apply(t: NCToken): Complex =
- Complex(
- data = Left(t),
- isToken = true,
- isWord = false,
- token = t,
- word = null,
- origText = t.origText,
- wordIndexes = t.wordIndexes.toSet,
- minIndex = t.wordIndexes.head,
- maxIndex = t.wordIndexes.last
- )
-
- def apply(t: NlpToken): Complex =
- Complex(
- data = Right(t),
- isToken = false,
- isWord = true,
- token = null,
- word = t,
- origText = t.origText,
- wordIndexes = t.wordIndexes.toSet,
- minIndex = t.wordIndexes.head,
- maxIndex = t.wordIndexes.last
- )
- }
-
- case class Complex(
- data: NCIdlToken,
- isToken: Boolean,
- isWord: Boolean,
- token: NCToken,
- word: NlpToken,
- origText: String,
- wordIndexes: Set[Int],
- minIndex: Int,
- maxIndex: Int
- ) {
- private final val hash = if (isToken) Seq(wordIndexes,
token.getId).hashCode() else wordIndexes.hashCode()
-
- override def hashCode(): Int = hash
-
- def isSubsetOf(minIndex: Int, maxIndex: Int, indexes: Set[Int]):
Boolean =
- if (this.minIndex > maxIndex || this.maxIndex < minIndex)
- false
- else
- wordIndexes.subsetOf(indexes)
-
- override def equals(obj: Any): Boolean = obj match {
- case x: Complex =>
- hash == x.hash && (isToken && x.isToken && token == x.token ||
isWord && x.isWord && word == x.word)
- case _ => false
- }
-
- // Added for debug reasons.
- override def toString: String = {
- val idxs = wordIndexes.mkString(",")
-
- if (isToken && token.getId != "nlpcraft:nlp") s"'$origText'
(${token.getId}) [$idxs]]" else s"'$origText' [$idxs]"
- }
- }
-
object ComplexSeq {
- def apply(all: Seq[Complex]): ComplexSeq =
ComplexSeq(all.filter(_.isToken), all.flatMap(_.wordIndexes).toSet)
+ def apply(all: Seq[IdlToken]): ComplexSeq =
ComplexSeq(all.filter(_.isToken), all.flatMap(_.wordIndexes).toSet)
}
- case class ComplexSeq(tokensComplexes: Seq[Complex], wordsIndexes:
Set[Int]) {
+ case class ComplexSeq(tokensComplexes: Seq[IdlToken], wordsIndexes:
Set[Int]) {
private val (idxsSet: Set[Int], minIndex: Int, maxIndex: Int) = {
val seq = tokensComplexes.flatMap(_.wordIndexes).distinct.sorted
@@ -126,8 +62,7 @@ object NCModelEnricher extends NCProbeEnricher {
override def toString: String = tokensComplexes.mkString(" | ")
}
- case class ComplexHolder(complexesWords: Seq[Complex], complexes:
Seq[ComplexSeq])
-
+ case class ComplexHolder(complexesWords: Seq[IdlToken], complexes:
Seq[ComplexSeq])
/**
*
@@ -299,13 +234,6 @@ object NCModelEnricher extends NCProbeEnricher {
*/
private def combosTokens(toks: Seq[NlpToken]): Seq[(Seq[NlpToken],
Seq[NlpToken])] =
combos(toks).flatMap(combo => {
- // TODO: delete after finish task.
-// val stops = combo.filter(_.isStopWord)
-//
-// val stops4Delete = Range.inclusive(1,
stops.size).flatMap(stops.combinations)
-//
-// (Seq(combo) ++ stops4Delete.map(del => combo.filter(t =>
!del.contains(t)))).map(_ -> combo)
-
val stops = combo.filter(s => s.isStopWord && s != combo.head && s
!= combo.last)
val slides =
mutable.ArrayBuffer.empty[mutable.ArrayBuffer[NlpToken]]
@@ -358,10 +286,10 @@ object NCModelEnricher extends NCProbeEnricher {
* @param seq
* @param s
*/
- private def toParts(mdl: NCProbeModel, stvReqId: String, seq:
Seq[NCIdlToken], s: Synonym): Seq[TokType] =
+ private def toParts(mdl: NCProbeModel, stvReqId: String, seq:
Seq[IdlToken], s: Synonym): Seq[TokType] =
seq.zip(s.map(_.kind)).flatMap {
case (complex, kind) =>
- val t = if (complex.isLeft) complex.swap.toOption.get else
mkNlpToken(mdl, stvReqId, complex.toOption.get)
+ val t = if (complex.isToken) complex.token else
mkNlpToken(mdl, stvReqId, complex.word)
Some(t -> kind)
}
@@ -371,10 +299,10 @@ object NCModelEnricher extends NCProbeEnricher {
* @param tows
* @param ns
*/
- private def toTokens(tows: Seq[NCIdlToken], ns: Sentence): Seq[NlpToken] =
+ private def toTokens(tows: Seq[IdlToken], ns: Sentence): Seq[NlpToken] =
(
- tows.filter(_.isRight).map(_.toOption.get) ++
- tows.filter(_.isLeft).map(_.swap.toOption.get).
+ tows.filter(_.isWord).map(_.word) ++
+ tows.filter(_.isToken).map(_.token).
flatMap(w => ns.filter(t =>
t.wordIndexes.intersect(w.wordIndexes).nonEmpty))
).sortBy(_.startCharIndex)
@@ -397,7 +325,7 @@ object NCModelEnricher extends NCProbeEnricher {
* @param ns
*/
private def mkComplexes(mdl: NCProbeModel, ns: Sentence): ComplexHolder = {
- val complexesWords = ns.map(Complex(_))
+ val complexesWords = ns.map(IdlToken(_))
val complexes =
NCProbeVariants.convert(ns.srvReqId, mdl,
NCSentenceManager.collapse(mdl.model, ns.clone())).
@@ -418,7 +346,7 @@ object NCModelEnricher extends NCProbeEnricher {
// Single word token is not split as words -
token.
// Partly (not strict in) token - word.
if (t.wordIndexes.length == 1 ||
senPartComb.contains(t))
- Seq(Complex(t))
+ Seq(IdlToken(t))
else
t.wordIndexes.map(complexesWords)
)
@@ -448,7 +376,7 @@ object NCModelEnricher extends NCProbeEnricher {
* @param h
* @param toks
*/
- private def mkCombinations(h: ComplexHolder, toks: Seq[NlpToken]):
Seq[Seq[Complex]] = {
+ private def mkCombinations(h: ComplexHolder, toks: Seq[NlpToken]):
Seq[Seq[IdlToken]] = {
val idxs = toks.flatMap(_.wordIndexes).toSet
h.complexes.par.
@@ -525,7 +453,14 @@ object NCModelEnricher extends NCProbeEnricher {
( parts.isEmpty || !parts.exists { case (t,
_) => t.getId == elemId })
if (ok)
- mark(ns, elemId, elemToks, direct =
syn.isDirect && U.isIncreased(resIdxs), syn = Some(syn), parts = parts)
+ mark(
+ ns,
+ elemId,
+ elemToks,
+ direct = syn.isDirect &&
U.isIncreased(resIdxs),
+ syn = Some(syn),
+ parts = parts
+ )
if (DEEP_DEBUG)
logger.trace(
@@ -586,7 +521,7 @@ object NCModelEnricher extends NCProbeEnricher {
// 1.2 Sparse.
if (!found && mdl.hasSparseSynonyms)
for (syn <- get(mdl.sparseSynonyms, elemId))
- NCSynonymsManager.onSparseMatchTokens(
+ NCSynonymsManager.onSparseMatch(
ns.srvReqId,
elemId,
syn,
@@ -604,16 +539,16 @@ object NCModelEnricher extends NCProbeEnricher {
if (!mdl.hasSparseSynonyms) {
var found = false
- for (syn <- allSyns; comb <- allCombs; data =
comb.map(_.data) if !found)
+ for (syn <- allSyns; comb <- allCombs; if
!found)
NCSynonymsManager.onMatch(
ns.srvReqId,
elemId,
syn,
- data,
+ comb,
req,
variantsToks,
_ => {
- val parts = toParts(mdl,
ns.srvReqId, data, syn)
+ val parts = toParts(mdl,
ns.srvReqId, comb, syn)
add("IDL continuous", toksExt,
syn, parts)
@@ -628,11 +563,11 @@ object NCModelEnricher extends NCProbeEnricher {
ns.srvReqId,
elemId,
syn,
- comb.map(_.data),
+ comb,
req,
variantsToks,
res => {
- val toks =
getSparsedTokens(toTokens(res, ns), toTokens(comb.map(_.data), ns))
+ val toks =
getSparsedTokens(toTokens(res, ns), toTokens(comb, ns))
val parts = toParts(mdl,
ns.srvReqId, res, syn)
val typ = if (syn.sparse) "IDL
sparse"else "IDL continuous"
diff --git
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/synonyms/NCSynonymsManager.scala
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/synonyms/NCSynonymsManager.scala
index 315f380..8de2fe7 100644
---
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/synonyms/NCSynonymsManager.scala
+++
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/synonyms/NCSynonymsManager.scala
@@ -22,9 +22,8 @@ import org.apache.nlpcraft.common.nlp.{NCNlpSentenceNote =>
NlpNote, NCNlpSenten
import org.apache.nlpcraft.common.{NCService, U}
import org.apache.nlpcraft.model._
import org.apache.nlpcraft.model.intent.{NCIdlContext, NCIdlFunction}
-import org.apache.nlpcraft.probe.mgrs.NCProbeSynonym.NCIdlToken
import org.apache.nlpcraft.probe.mgrs.NCProbeSynonymChunkKind.{IDL,
NCSynonymChunkKind, REGEX, TEXT}
-import org.apache.nlpcraft.probe.mgrs.{NCProbeSynonymChunk, NCProbeSynonym =>
Synonym}
+import org.apache.nlpcraft.probe.mgrs.{NCProbeIdlToken => IdlToken,
NCProbeSynonymChunk, NCProbeSynonym => Synonym}
import scala.collection.mutable
import
scala.collection.parallel.CollectionConverters.ImmutableIterableIsParallelizable
@@ -79,8 +78,8 @@ object NCSynonymsManager extends NCService {
}
private val savedIdl = mutable.HashMap.empty[String,
mutable.HashMap[SavedIdlKey, mutable.ArrayBuffer[Value]]]
- private val idlChunksCache = mutable.HashMap.empty[String,
mutable.HashMap[(NCIdlToken, NCProbeSynonymChunk), Boolean]]
- private val idlCaches = mutable.HashMap.empty[String,
CacheHolder[NCIdlToken]]
+ private val idlChunksCache = mutable.HashMap.empty[String,
mutable.HashMap[(IdlToken, NCProbeSynonymChunk), Boolean]]
+ private val idlCaches = mutable.HashMap.empty[String,
CacheHolder[IdlToken]]
private val tokCaches = mutable.HashMap.empty[String, CacheHolder[Int]]
override def start(parent: Span): NCService = {
@@ -197,22 +196,41 @@ object NCSynonymsManager extends NCService {
/**
*
+ * @param srvReqId
+ * @param elemId
+ * @param s
+ * @param tokens
+ */
+ private def isUnprocessedTokens(srvReqId: String, elemId: String, s:
Synonym, tokens: Seq[Int]): Boolean =
+ tokCaches.getOrElseUpdate(srvReqId, new
CacheHolder[Int]).isUnprocessed(elemId, s, tokens)
+
+ /**
+ *
+ * @param srvReqId
+ * @param elemId
+ * @param s
+ * @param tokens
+ */
+ private def isUnprocessedIdl(srvReqId: String, elemId: String, s: Synonym,
tokens: Seq[IdlToken]): Boolean =
+ idlCaches.getOrElseUpdate(srvReqId, new
CacheHolder[IdlToken]).isUnprocessed(elemId, s, tokens)
+
+ /**
+ *
* @param tow
* @param chunk
* @param req
* @param variantsToks
*/
- private def isMatch(tow: NCIdlToken, chunk: NCProbeSynonymChunk, req:
NCRequest, variantsToks: Seq[Seq[NCToken]]): Boolean =
+ private def isMatch(tow: IdlToken, chunk: NCProbeSynonymChunk, req:
NCRequest, variantsToks: Seq[Seq[NCToken]]): Boolean =
idlChunksCache.
getOrElseUpdate(req.getServerRequestId,
- mutable.HashMap.empty[(NCIdlToken, NCProbeSynonymChunk),
Boolean]
+ mutable.HashMap.empty[(IdlToken, NCProbeSynonymChunk), Boolean]
).
getOrElseUpdate(
(tow, chunk),
{
def get0[T](fromToken: NCToken => T, fromWord: NlpToken =>
T): T =
- if (tow.isLeft) fromToken(tow.swap.toOption.get)
- else fromWord(tow.toOption.get)
+ if (tow.isToken) fromToken(tow.token) else
fromWord(tow.word)
chunk.kind match {
case TEXT => chunk.wordStem == get0(_.stem, _.stem)
@@ -231,7 +249,7 @@ object NCSynonymsManager extends NCService {
)
if (ok)
- save(req, tow.swap.toOption.get,
chunk.idlPred, variantsToks)
+ save(req, tow.token, chunk.idlPred,
variantsToks)
ok
@@ -269,7 +287,7 @@ object NCSynonymsManager extends NCService {
* @param srvReqId
* @param elemId
* @param s
- * @param tows
+ * @param toks
* @param req
* @param variantsToks
* @param callback
@@ -278,18 +296,18 @@ object NCSynonymsManager extends NCService {
srvReqId: String,
elemId: String,
s: Synonym,
- tows: Seq[NCIdlToken],
+ toks: Seq[IdlToken],
req: NCRequest,
variantsToks: Seq[Seq[NCToken]],
callback: Unit => Unit
): Unit =
- if (isUnprocessedIdl(srvReqId, elemId, s, tows)) {
- require(tows != null)
+ if (isUnprocessedIdl(srvReqId, elemId, s, toks)) {
+ require(toks != null)
if (
- tows.length == s.length &&
- tows.count(_.isLeft) >= s.idlChunks && {
- tows.zip(s).sortBy(p => getSort(p._2.kind)).forall {
+ toks.length == s.length &&
+ toks.count(_.isToken) >= s.idlChunks && {
+ toks.zip(s).sortBy(p => getSort(p._2.kind)).forall {
case (tow, chunk) => isMatch(tow, chunk, req,
variantsToks)
}
}
@@ -305,7 +323,7 @@ object NCSynonymsManager extends NCService {
* @param toks
* @param callback
*/
- def onSparseMatchTokens(
+ def onSparseMatch(
srvReqId: String, elemId: String, syn: Synonym, toks: Seq[NlpToken],
callback: Seq[NlpToken] => Unit
): Unit =
if (isUnprocessedTokens(srvReqId, elemId, syn, toks.map(_.index))) {
@@ -323,7 +341,7 @@ object NCSynonymsManager extends NCService {
* @param srvReqId
* @param elemId
* @param syn
- * @param tows
+ * @param toks
* @param req
* @param variantsToks
* @param callback
@@ -332,22 +350,22 @@ object NCSynonymsManager extends NCService {
srvReqId: String,
elemId: String,
syn: Synonym,
- tows: Seq[NCIdlToken],
+ toks: Seq[IdlToken],
req: NCRequest,
variantsToks: Seq[Seq[NCToken]],
- callback: Seq[NCIdlToken] => Unit
+ callback: Seq[IdlToken] => Unit
): Unit =
- if (isUnprocessedIdl(srvReqId, elemId, syn, tows)) {
- require(tows != null)
+ if (isUnprocessedIdl(srvReqId, elemId, syn, toks)) {
+ require(toks != null)
require(req != null)
require(syn.hasIdl)
sparseMatch0(
syn,
- tows,
- (t: NCIdlToken, chunk: NCProbeSynonymChunk) => isMatch(t,
chunk, req, variantsToks),
- (t: NCIdlToken) => if (t.isLeft)
t.swap.toOption.get.getStartCharIndex
- else t.toOption.get.startCharIndex,
+ toks,
+ (t: IdlToken, chunk: NCProbeSynonymChunk) => isMatch(t, chunk,
req, variantsToks),
+ (t: IdlToken) =>
+ if (t.isToken) t.token.getStartCharIndex else
t.word.startCharIndex,
shouldBeNeighbors = !syn.sparse
) match {
case Some(res) => callback(res)
@@ -401,26 +419,6 @@ object NCSynonymsManager extends NCService {
/**
*
* @param srvReqId
- * @param elemId
- * @param s
- * @param tokens
- */
- private def isUnprocessedTokens(srvReqId: String, elemId: String, s:
Synonym, tokens: Seq[Int]): Boolean =
- tokCaches.getOrElseUpdate(srvReqId, new
CacheHolder[Int]).isUnprocessed(elemId, s, tokens)
-
- /**
- *
- * @param srvReqId
- * @param elemId
- * @param s
- * @param tokens
- */
- private def isUnprocessedIdl(srvReqId: String, elemId: String, s: Synonym,
tokens: Seq[NCIdlToken]): Boolean =
- idlCaches.getOrElseUpdate(srvReqId, new
CacheHolder[NCIdlToken]).isUnprocessed(elemId, s, tokens)
-
- /**
- *
- * @param srvReqId
*/
def clearRequestData(srvReqId: String): Unit = {
clearIteration(srvReqId)