cpoerschke commented on a change in pull request #1571:
URL: https://github.com/apache/lucene-solr/pull/1571#discussion_r518364159



##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/search/LTRQParserPlugin.java
##########
@@ -146,93 +149,114 @@ public LTRQParser(String qstr, SolrParams localParams, 
SolrParams params,
     @Override
     public Query parse() throws SyntaxError {
       // ReRanking Model
-      final String modelName = localParams.get(LTRQParserPlugin.MODEL);
-      if ((modelName == null) || modelName.isEmpty()) {
+      final String[] modelNames = 
localParams.getParams(LTRQParserPlugin.MODEL);
+      if ((modelNames == null) || modelNames.length==0 || 
modelNames[0].isEmpty()) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
             "Must provide model in the request");
       }
-
-      final LTRScoringModel ltrScoringModel = mr.getModel(modelName);
-      if (ltrScoringModel == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "cannot find " + LTRQParserPlugin.MODEL + " " + modelName);
-      }
-
-      final String modelFeatureStoreName = 
ltrScoringModel.getFeatureStoreName();
-      final boolean extractFeatures = 
SolrQueryRequestContextUtils.isExtractingFeatures(req);
-      final String fvStoreName = 
SolrQueryRequestContextUtils.getFvStoreName(req);
-      // Check if features are requested and if the model feature store and 
feature-transform feature store are the same
-      final boolean featuresRequestedFromSameStore = 
(modelFeatureStoreName.equals(fvStoreName) || fvStoreName == null) ? 
extractFeatures:false;
-      if (threadManager != null) {
-        
threadManager.setExecutor(req.getCore().getCoreContainer().getUpdateShardHandler().getUpdateExecutor());
-      }
-      final LTRScoringQuery scoringQuery = new LTRScoringQuery(ltrScoringModel,
-          extractEFIParams(localParams),
-          featuresRequestedFromSameStore, threadManager);
-
-      // Enable the feature vector caching if we are extracting features, and 
the features
-      // we requested are the same ones we are reranking with
-      if (featuresRequestedFromSameStore) {
-        scoringQuery.setFeatureLogger( 
SolrQueryRequestContextUtils.getFeatureLogger(req) );
+     
+      LTRScoringQuery[] rerankingQueries = new 
LTRScoringQuery[modelNames.length];
+      for (int i = 0; i < modelNames.length; i++) {
+        final LTRScoringQuery rerankingQuery;
+        if (!ORIGINAL_RANKING.equals(modelNames[i])) {
+          final LTRScoringModel ltrScoringModel = mr.getModel(modelNames[i]);
+          if (ltrScoringModel == null) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "cannot find " + LTRQParserPlugin.MODEL + " " + modelNames[i]);
+          }
+          final String modelFeatureStoreName = 
ltrScoringModel.getFeatureStoreName();
+          final boolean extractFeatures = 
SolrQueryRequestContextUtils.isExtractingFeatures(req);
+          final String fvStoreName = 
SolrQueryRequestContextUtils.getFvStoreName(req);        // Check if features 
are requested and if the model feature store and feature-transform feature 
store are the same
+          final boolean featuresRequestedFromSameStore = 
(modelFeatureStoreName.equals(fvStoreName) || fvStoreName == null) ? 
extractFeatures : false;
+          if (threadManager != null) {
+            
threadManager.setExecutor(req.getCore().getCoreContainer().getUpdateShardHandler().getUpdateExecutor());
+          }
+          rerankingQuery = new LTRScoringQuery(ltrScoringModel,
+              extractEFIParams(localParams),
+              featuresRequestedFromSameStore, threadManager);
+
+          // Enable the feature vector caching if we are extracting features, 
and the features
+          // we requested are the same ones we are reranking with
+          if (featuresRequestedFromSameStore) {
+            rerankingQuery.setFeatureLogger( 
SolrQueryRequestContextUtils.getFeatureLogger(req) );
+          }
+        }else{
+          rerankingQuery = new LTRScoringQuery(null);
+        }
+
+        // External features
+        rerankingQuery.setRequest(req);
+        rerankingQueries[i] = rerankingQuery;
       }
-      SolrQueryRequestContextUtils.setScoringQuery(req, scoringQuery);
 
+      SolrQueryRequestContextUtils.setScoringQuery(req, rerankingQueries);
       int reRankDocs = localParams.getInt(RERANK_DOCS, DEFAULT_RERANK_DOCS);
       if (reRankDocs <= 0) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Must rerank at least 1 document");
+            "Must rerank at least 1 document");
+      }
+      if (rerankingQueries.length == 1) {
+        return new LTRQuery(rerankingQueries[0], reRankDocs);
+      } else {
+        return new LTRQuery(rerankingQueries, reRankDocs);
       }
-
-      // External features
-      scoringQuery.setRequest(req);
-
-      return new LTRQuery(scoringQuery, reRankDocs);
     }
   }
+  
+  public static boolean isOriginalRanking(LTRScoringQuery rerankingQuery){
+    return rerankingQuery.getScoringModel() == null;
+  }
 
   /**
    * A learning to rank Query, will incapsulate a learning to rank model, and 
delegate to it the rescoring
    * of the documents.
    **/
   public class LTRQuery extends AbstractReRankQuery {
-    private final LTRScoringQuery scoringQuery;
+    private final LTRScoringQuery[] rerankingQueries;
 
-    public LTRQuery(LTRScoringQuery scoringQuery, int reRankDocs) {
-      super(defaultQuery, reRankDocs, new LTRRescorer(scoringQuery));
-      this.scoringQuery = scoringQuery;
+    public LTRQuery(LTRScoringQuery[] rerankingQueries, int rerankDocs) {
+      super(defaultQuery, rerankDocs, new 
LTRInterleavingRescorer(rerankingQueries));
+      this.rerankingQueries = rerankingQueries;
+    }
+
+    public LTRQuery(LTRScoringQuery rerankingQuery, int rerankDocs) {
+      super(defaultQuery, rerankDocs, new LTRRescorer(rerankingQuery));
+      this.rerankingQueries = new LTRScoringQuery[]{rerankingQuery};

Review comment:
       That's a fair question.
   
   Apart from the subclass considerations already annotated I see sort of 
"structural extensibility" type benefits, not sure if that's a good 
description, let me try to illustrate with two examples:
   
   * example 1: beyond TeamDraftInterleaving: An interleaving algorithm other 
than `TeamDraftInterleaving` could be supported like I alluded to in 11.1/n 
though of course with the existing class structure that would also be possible.
   
   ```
   - public LTRInterleavingQuery(LTRInterleavingScoringQuery[] 
rerankingQueries, int rerankDocs) {
   -     super(null /* scoringQuery */, rerankDocs, new 
LTRInterleavingRescorer(rerankingQueries));
   + public LTRInterleavingQuery(LTRInterleavingScoringQuery[] 
rerankingQueries, int rerankDocs, Interleaving interleavingAlgorithm) {
   +     super(null /* scoringQuery */, rerankDocs, new 
LTRInterleavingRescorer(rerankingQueries, interleavingAlgorithm));
   ```
   
   vs.
   
   ```
   - public LTRQuery(LTRScoringQuery[] rerankingQueries, int rerankDocs) {
   -     super(defaultQuery, rerankDocs, new 
LTRInterleavingRescorer(rerankingQueries));
   + public LTRQuery(LTRScoringQuery[] rerankingQueries, int rerankDocs, 
Interleaving interleavingAlgorithm) {
   +     super(defaultQuery, rerankDocs, new 
LTRInterleavingRescorer(rerankingQueries, interleavingAlgorithm));
         this.rerankingQueries = rerankingQueries;
     }
   
     public LTRQuery(LTRScoringQuery rerankingQuery, int rerankDocs) {
         super(defaultQuery, rerankDocs, new LTRRescorer(rerankingQuery));
         this.rerankingQueries = new LTRScoringQuery[]{rerankingQuery};
     }
   ```
   
   You'd know better than me what other interleaving algorithms exist and might 
be interesting for LTR and what sort of parameters they might need from 
`LTRQParserPlugin`. One variant of the current `TeamDraftInterleaving` that I 
(naively) could see is for the two models to interleave not with the current 
50:50 ratio but with (say) a 80:20 ratio.
   
   
   * example 2: beyond Interleaving: we've got one model, we've got 
interleaving of two models, might there be other ways of combining models or of 
rescoring? I don't know what these might be: choose a document only if both 
models had picked it? choose a document only if at least 2 of 3 models picked 
it? hmm, maybe those are variants of interleaving. could one model rescore the 
documents already scored by another model?
   
   anyhow, whatever the shiny new thing is, there'd be an existing 
`LTRQuery/LTRScoringQuery/LTRRescorer` + 
`LTRInterleavingQuery/LTRInterleavingScoringQuery/LTRInterleavingRescorer` 
pattern that might be repeated then as 
`LTRShinyNewQuery/LTRShinyNewScoringQuery/LTRShinyNewRescorer` and whilst 
`LTRInterleaving*` required only the `pickedInterleavingDocIds` as state within 
`LTR[Interleaving]ScoringQuery` perhaps `LTRShinyNew*` would require something 
more complex.
   
   ----
   
   When you say _"there is a little bit of code we are adding around"_ I agree, 
yes, there's all the methods twice though slightly different then.
   * Is your concern about the repetition as such and/or about the increase in 
the `LTRQParserPlugin` class in which the `LTR[Interleaving]Query` classes are 
inner classes?
   * It appears that the inner classes are not _but could be_ static inner 
classes and therefore it should also be possible to promote them to not be 
inner classes of `LTRQParserPlugin` but classes in their own right and that 
would make `LTRQParserPlugin` shorter.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to