This is an automated email from the ASF dual-hosted git repository.

sergeykamov pushed a commit to branch NLPCRAFT-70_NEW
in repository https://gitbox.apache.org/repos/asf/incubator-nlpcraft.git


The following commit(s) were added to refs/heads/NLPCRAFT-70_NEW by this push:
     new 389e733  WIP.
389e733 is described below

commit 389e73342c55d7ed45ee58037d8ec28c87589103
Author: Sergey Kamov <[email protected]>
AuthorDate: Tue Jul 6 17:20:20 2021 +0300

    WIP.
---
 .../nlpcraft/server/mdo/NCProbeModelMdo.scala      |  1 -
 .../ctxword/NCContextWordCategoriesEnricher.scala  | 38 +++++-----------------
 .../server/sugsyn/NCSuggestSynonymManager.scala    | 17 ++--------
 3 files changed, 11 insertions(+), 45 deletions(-)

diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/mdo/NCProbeModelMdo.scala 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/mdo/NCProbeModelMdo.scala
index d330ec7..cb85a83 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/mdo/NCProbeModelMdo.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/mdo/NCProbeModelMdo.scala
@@ -19,7 +19,6 @@ package org.apache.nlpcraft.server.mdo
 
 import org.apache.nlpcraft.server.mdo.impl._
 
-
 @NCMdoEntity(sql = false)
 case class NCCtxWordCategoriesConfigMdo(
     @NCMdoField probeId: String,
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/ctxword/NCContextWordCategoriesEnricher.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/ctxword/NCContextWordCategoriesEnricher.scala
index 503e504..8307863 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/ctxword/NCContextWordCategoriesEnricher.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/ctxword/NCContextWordCategoriesEnricher.scala
@@ -259,20 +259,13 @@ object NCContextWordCategoriesEnricher extends 
NCServerEnricher {
       * @return
       */
     private def getCorpusData(cfg: NCCtxWordCategoriesConfigMdo, key: 
ModelProbeKey, parent: Span = null):
-    Map[
-
-        /** Element ID */
-        String, ElementData] =
-        elemsCorpuses.synchronized {
-            elemsCorpuses.get(key)
-        } match {
+        Map[/** Element ID */String, ElementData] =
+        elemsCorpuses.synchronized { elemsCorpuses.get(key) } match {
             case Some(cache) => cache
             case None =>
                 val res = askSamples(cfg, parent)
 
-                elemsCorpuses.synchronized {
-                    elemsCorpuses += key -> res
-                }
+                elemsCorpuses.synchronized { elemsCorpuses += key -> res }
 
                 res
         }
@@ -284,9 +277,7 @@ object NCContextWordCategoriesEnricher extends 
NCServerEnricher {
       * @return
       */
     private def getValuesData(cfg: NCCtxWordCategoriesConfigMdo, key: 
ModelProbeKey): ValuesHolder =
-        valuesStems.synchronized {
-            valuesStems.get(key)
-        } match {
+        valuesStems.synchronized { valuesStems.get(key) } match {
             case Some(cache) => cache
             case None =>
                 def mkMap(convert: String => String): Map[String, Set[String]] 
=
@@ -301,9 +292,7 @@ object NCContextWordCategoriesEnricher extends 
NCServerEnricher {
 
                 val h = ValuesHolder(normal = normsMap, stems = 
stemsMap.filter(p => !normsMap.keySet.contains(p._1)))
 
-                valuesStems.synchronized {
-                    valuesStems += key -> h
-                }
+                valuesStems.synchronized { valuesStems += key -> h }
 
                 h
         }
@@ -338,10 +327,8 @@ object NCContextWordCategoriesEnricher extends 
NCServerEnricher {
       * @return
       */
     @throws[NCE]
-    private def askSamples(cfg: NCCtxWordCategoriesConfigMdo, parent: Span = 
null): Map[
-
-        /** Element ID */
-        String, ElementData] = {
+    private def askSamples(cfg: NCCtxWordCategoriesConfigMdo, parent: Span = 
null):
+    Map[/** Element ID */String, ElementData] = {
         val corpusSeq = cfg.corpus.toSeq
         val corpusWords = corpusSeq.map(parser.parse(_).map(_.word))
         val nlpWords = corpusSeq.map(s => parser.parse(s))
@@ -392,16 +379,7 @@ object NCContextWordCategoriesEnricher extends 
NCServerEnricher {
             val respsSeq: Seq[(NCSuggestionRequest, Seq[NCWordSuggestion])] = 
resps.toSeq
 
             def mkMap(convert: (NCSuggestionRequest, NCWordSuggestion) => 
String):
-            Map[
-
-                /** Element ID */
-                String,
-
-                /** Word key */
-                Map[String,
-
-                    /** Confidences */
-                    Seq[Double]]] = {
+                Map[/** Element ID */ String, /** Word key */ Map[String, /** 
Confidences */ Seq[Double]]] = {
                 val seq: Seq[(String, Map[String, Double])] =
                     respsSeq.
                         map { case (req, suggs) =>
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/sugsyn/NCSuggestSynonymManager.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/sugsyn/NCSuggestSynonymManager.scala
index c2c8d23..3d83030 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/sugsyn/NCSuggestSynonymManager.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/sugsyn/NCSuggestSynonymManager.scala
@@ -502,23 +502,12 @@ object NCSuggestSynonymManager extends NCService {
                         if (cnt.incrementAndGet() == batches.size) {
                             val min = minScoreOpt.getOrElse(DFLT_MIN_SCORE)
 
-                            val map: Map[NCSuggestionRequest, 
Seq[NCWordSuggestion]] =
+                            promise.success(
                                 data.asScala.groupBy(_.request).map {
                                     case (req, ress) =>
                                         req -> 
ress.flatMap(_.suggestions.filter(_.score >= min).toSeq).sortBy(-_.score)
-                            }
-
-                            // TODO ? logic?
-//                            val map: Map[NCSuggestionRequest, 
Seq[NCWordSuggestion]] = data.asScala.groupBy(_.request).map(p =>
-//                                p._1 ->
-//                                p._2.
-//                                    map(_.suggestions.map(p => 
(toStem(p.word), p.score))).
-//                                    map(_.groupBy(_._1)).
-//                                    flatMap(p => p.map(p => p._1 -> p._1 -> 
p._2.map(_._2).sum / p._2.size).
-//                                    filter(_._2 >= min).
-//                                    map(p => NCWordSuggestion(p._1._2, 
p._2)).toSeq).toSeq)
-
-                            promise.success(map)
+                                }
+                            )
                         }
                         ()
                     },

Reply via email to