EBernhardson has uploaded a new change for review.

  https://gerrit.wikimedia.org/r/324852

Change subject: [WIP] Port learn to rank from solr cloud
......................................................................

[WIP] Port learn to rank from solr cloud

https://issues.apache.org/jira/browse/SOLR-8542

This does not in any way work yet. But it does compile (without the
tests...). Needs REST api's implemented for uploading models. Needs
MultipleAdditiveTree's ported to use ranklib LambdaMART models. The
model and feature store's need to be properly managed with the
elasticsearch lifecycle. Probably lots more.

This might also make more sense in a separate repository. But for the
moment this was an easy place to get started working out what might
be possible.

Change-Id: I00c499c207ba7de05b396c928d7c1c172ac72431
---
M src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
A src/main/java/org/wikimedia/search/extra/ltr/.LTRScoringQueryBuilder.java.swp
A src/main/java/org/wikimedia/search/extra/ltr/DocInfo.java
A src/main/java/org/wikimedia/search/extra/ltr/FeatureLogger.java
A src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQuery.java
A src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryBuilder.java
A src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryParser.java
A src/main/java/org/wikimedia/search/extra/ltr/feature/Feature.java
A src/main/java/org/wikimedia/search/extra/ltr/feature/FeatureException.java
A src/main/java/org/wikimedia/search/extra/ltr/feature/FieldLengthFeature.java
A src/main/java/org/wikimedia/search/extra/ltr/feature/FieldValueFeature.java
A src/main/java/org/wikimedia/search/extra/ltr/feature/ValueFeature.java
A src/main/java/org/wikimedia/search/extra/ltr/feature/package-info.java
A src/main/java/org/wikimedia/search/extra/ltr/model/LTRScoringModel.java
A src/main/java/org/wikimedia/search/extra/ltr/model/LinearModel.java
A src/main/java/org/wikimedia/search/extra/ltr/model/ModelException.java
A src/main/java/org/wikimedia/search/extra/ltr/model/package-info.java
A src/main/java/org/wikimedia/search/extra/ltr/norm/IdentityNormalizer.java
A src/main/java/org/wikimedia/search/extra/ltr/norm/MinMaxNormalizer.java
A src/main/java/org/wikimedia/search/extra/ltr/norm/Normalizer.java
A src/main/java/org/wikimedia/search/extra/ltr/norm/NormalizerException.java
A src/main/java/org/wikimedia/search/extra/ltr/norm/StandardNormalizer.java
A src/main/java/org/wikimedia/search/extra/ltr/norm/package-info.java
A src/main/java/org/wikimedia/search/extra/ltr/package-info.java
A src/main/java/org/wikimedia/search/extra/ltr/store/FeatureStore.java
A src/main/java/org/wikimedia/search/extra/ltr/store/LTRStoreService.java
A src/main/java/org/wikimedia/search/extra/ltr/store/ModelStore.java
A src/main/java/org/wikimedia/search/extra/ltr/store/package-info.java
28 files changed, 2,839 insertions(+), 1 deletion(-)


  git pull ssh://gerrit.wikimedia.org:29418/search/extra 
refs/changes/52/324852/1

diff --git a/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java 
b/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
index 7e993db..c19e5f0 100644
--- a/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
+++ b/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
@@ -14,6 +14,8 @@
 import 
org.wikimedia.search.extra.analysis.filters.PreserveOriginalFilterFactory;
 import org.wikimedia.search.extra.fuzzylike.FuzzyLikeThisQueryParser;
 import org.wikimedia.search.extra.idhashmod.IdHashModQueryParser;
+//import org.wikimedia.search.extra.ltr.LTRScoringQueryParser;
+//import org.wikimedia.search.extra.ltr.store.LTRStoreService;
 import org.wikimedia.search.extra.levenshtein.LevenshteinDistanceScoreParser;
 import org.wikimedia.search.extra.regex.SourceRegexQueryParser;
 import org.wikimedia.search.extra.superdetectnoop.ChangeHandler;
@@ -43,6 +45,7 @@
         module.registerQueryParser(SourceRegexQueryParser.class);
         module.registerQueryParser(IdHashModQueryParser.class);
         module.registerQueryParser(FuzzyLikeThisQueryParser.class);
+        //module.registerQueryParser(LTRScoringQueryParser.class);
     }
 
     /**
@@ -70,7 +73,9 @@
 
     @Override
     public Collection<Module> nodeModules() {
-        return Collections.<Module>singleton(new CloseEnoughDetectorsModule());
+        return Collections.<Module>singleton(
+            new CloseEnoughDetectorsModule());
+            //new LTRStoreModule());
     }
 
     public static class CloseEnoughDetectorsModule extends AbstractModule {
@@ -84,4 +89,11 @@
             handlers.addBinding().toInstance(new SetHandler.Recognizer());
         }
     }
+
+    public static class LTRStoreModule extends AbstractModule {
+        @Override
+        protected void configure() {
+            //bind(LTRStoreService.class).asEagerSingleton();
+        }
+    }
 }
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/.LTRScoringQueryBuilder.java.swp 
b/src/main/java/org/wikimedia/search/extra/ltr/.LTRScoringQueryBuilder.java.swp
new file mode 100644
index 0000000..51b0001
--- /dev/null
+++ 
b/src/main/java/org/wikimedia/search/extra/ltr/.LTRScoringQueryBuilder.java.swp
Binary files differ
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/DocInfo.java 
b/src/main/java/org/wikimedia/search/extra/ltr/DocInfo.java
new file mode 100644
index 0000000..09427ed
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/DocInfo.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr;
+
+import java.util.HashMap;
+
+public class DocInfo extends HashMap<String,Object> {
+
+  // Name of key used to store the original score of a doc
+  private static final String ORIGINAL_DOC_SCORE = "ORIGINAL_DOC_SCORE";
+
+  public DocInfo() {
+    super();
+  }
+
+  public void setOriginalDocScore(Float score) {
+    put(DocInfo.ORIGINAL_DOC_SCORE, score);
+  }
+
+  public Float getOriginalDocScore() {
+    return (Float)get(DocInfo.ORIGINAL_DOC_SCORE);
+  }
+
+  public boolean hasOriginalDocScore() {
+    return containsKey(DocInfo.ORIGINAL_DOC_SCORE);
+  }
+
+}
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/FeatureLogger.java 
b/src/main/java/org/wikimedia/search/extra/ltr/FeatureLogger.java
new file mode 100644
index 0000000..8ee95a8
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/FeatureLogger.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.wikimedia.search.extra.ltr.LTRScoringQuery.FeatureInfo;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+
+/**
+ * FeatureLogger can be registered in a model and provide a strategy for 
logging
+ * the feature values.
+ */
+public abstract class FeatureLogger<FV_TYPE> {
+
+  private static final ESLogger log = 
ESLoggerFactory.getLogger(FeatureLogger.class.getName());
+
+  /** the name of the cache using for storing the feature value **/
+  private static final String QUERY_FV_CACHE_NAME = "QUERY_DOC_FV";
+
+  protected enum FeatureFormat {DENSE, SPARSE};
+  protected final FeatureFormat featureFormat;
+  
+  protected FeatureLogger(FeatureFormat f) {
+    this.featureFormat = f;
+  }
+  
+  /**
+   * Log will be called every time that the model generates the feature values
+   * for a document and a query.
+   *
+   * @param docid
+   *          Solr document id whose features we are saving
+   * @param featuresInfo
+   *          List of all the FeatureInfo objects which contain name and value
+   *          for all the features triggered by the result set
+   * @return true if the logger successfully logged the features, false
+   *         otherwise.
+   */
+
+  public boolean log(int docid, LTRScoringQuery scoringQuery,
+      FeatureInfo[] featuresInfo) {
+    final FV_TYPE featureVector = makeFeatureVector(featuresInfo);
+    if (featureVector == null) {
+      return false;
+    }
+
+    /**
+    return searcher.cacheInsert(QUERY_FV_CACHE_NAME,
+        fvCacheKey(scoringQuery, docid), featureVector) != null;
+    */
+    return false;
+  }
+
+  /**
+   * returns a FeatureLogger that logs the features in output, using the format
+   * specified in the 'stringFormat' param: 'csv' will log the features as a 
unique
+   * string in csv format 'json' will log the features in a map in a Map of
+   * featureName keys to featureValue values if format is null or empty, csv
+   * format will be selected.
+   * 'featureFormat' param: 'dense' will write features in dense format,
+   * 'sparse' will write the features in sparse format, null or empty will
+   * default to 'sparse'  
+   *
+   *
+   * @return a feature logger for the format specified.
+   */
+  public static FeatureLogger<?> createFeatureLogger(String stringFormat, 
String featureFormat) {
+    final FeatureFormat f;
+    if (featureFormat == null || featureFormat.isEmpty() ||
+        featureFormat.equals("sparse")) {
+      f = FeatureFormat.SPARSE;
+    }
+    else if (featureFormat.equals("dense")) {
+      f = FeatureFormat.DENSE;
+    }
+    else {
+      f = FeatureFormat.SPARSE;
+      log.warn("unknown feature logger feature format {} | {}", stringFormat, 
featureFormat);
+    }
+    if ((stringFormat == null) || stringFormat.isEmpty()) {
+      return new CSVFeatureLogger(f);
+    }
+    if (stringFormat.equals("csv")) {
+      return new CSVFeatureLogger(f);
+    }
+    if (stringFormat.equals("json")) {
+      return new MapFeatureLogger(f);
+    }
+    log.warn("unknown feature logger string format {} | {}", stringFormat, 
featureFormat);
+    return null;
+
+  }
+
+  public abstract FV_TYPE makeFeatureVector(FeatureInfo[] featuresInfo);
+
+  private static int fvCacheKey(LTRScoringQuery scoringQuery, int docid) {
+    return  scoringQuery.hashCode() + (31 * docid);
+  }
+
+  /**
+   * populate the document with its feature vector
+   *
+   * @param docid
+   *          Solr document id
+   * @return String representation of the list of features calculated for docid
+   */
+  
+  public FV_TYPE getFeatureVector(int docid, LTRScoringQuery scoringQuery) {
+    /**
+    return (FV_TYPE) searcher.cacheLookup(QUERY_FV_CACHE_NAME, 
fvCacheKey(scoringQuery, docid));
+    */
+    return null;
+  }
+
+
+  public static class MapFeatureLogger extends 
FeatureLogger<Map<String,Float>> {
+
+    public MapFeatureLogger(FeatureFormat f) {
+      super(f);
+    }
+    
+    @Override
+    public Map<String,Float> makeFeatureVector(FeatureInfo[] featuresInfo) {
+      boolean isDense = featureFormat.equals(FeatureFormat.DENSE);
+      Map<String,Float> hashmap = Collections.emptyMap();
+      if (featuresInfo.length > 0) {
+        hashmap = new HashMap<String,Float>(featuresInfo.length);
+        for (FeatureInfo featInfo:featuresInfo){ 
+          if (featInfo.isUsed() || isDense){
+            hashmap.put(featInfo.getName(), featInfo.getValue());
+          }
+        }
+      }
+      return hashmap;
+    }
+
+  }
+
+  public static class CSVFeatureLogger extends FeatureLogger<String> {
+    StringBuilder sb = new StringBuilder(500);
+    char keyValueSep = ':';
+    char featureSep = ';';
+    
+    public CSVFeatureLogger(FeatureFormat f) {
+      super(f);
+    }
+
+    public CSVFeatureLogger setKeyValueSep(char keyValueSep) {
+      this.keyValueSep = keyValueSep;
+      return this;
+    }
+
+    public CSVFeatureLogger setFeatureSep(char featureSep) {
+      this.featureSep = featureSep;
+      return this;
+    }
+
+    @Override
+    public String makeFeatureVector(FeatureInfo[] featuresInfo) {
+      boolean isDense = featureFormat.equals(FeatureFormat.DENSE);
+      for (FeatureInfo featInfo:featuresInfo) {
+          if (featInfo.isUsed() || isDense){
+             sb.append(featInfo.getName()).append(keyValueSep)
+                 .append(featInfo.getValue());
+             sb.append(featureSep);
+          }
+      }
+
+      final String features = (sb.length() > 0 ? sb.substring(0,
+          sb.length() - 1) : "");
+      sb.setLength(0);
+
+      return features;
+    }
+
+  }
+
+}
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQuery.java 
b/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQuery.java
new file mode 100644
index 0000000..4189207
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQuery.java
@@ -0,0 +1,644 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.RunnableFuture;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DisiPriorityQueue;
+import org.apache.lucene.search.DisiWrapper;
+import org.apache.lucene.search.DisjunctionDISIApproximation;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.wikimedia.search.extra.ltr.feature.Feature;
+import org.wikimedia.search.extra.ltr.feature.Feature.FeatureWeight;
+import 
org.wikimedia.search.extra.ltr.feature.Feature.FeatureWeight.FeatureScorer;
+import org.wikimedia.search.extra.ltr.model.LTRScoringModel;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+
+/**
+ * The ranking query that is run, reranking results using the
+ * LTRScoringModel algorithm
+ */
+public class LTRScoringQuery extends Query {
+
+  private static final int CLASS_NAME_HASH = 
LTRScoringQuery.class.getName().hashCode();
+  private static final ESLogger log = 
ESLoggerFactory.getLogger(LTRScoringQuery.class.getName());
+
+  // contains a description of the model
+  final private LTRScoringModel ltrScoringModel;
+  final private boolean extractAllFeatures;
+
+  // feature logger to output the features.
+  protected FeatureLogger<?> fl;
+  // Map of external parameters, such as query intent, that can be used by
+  // features
+  protected final Map<String,String[]> efi;
+
+  public LTRScoringQuery(LTRScoringModel ltrScoringModel) {
+    this(ltrScoringModel, Collections.<String,String[]>emptyMap(), false);
+  }
+
+  public LTRScoringQuery(LTRScoringModel ltrScoringModel, boolean 
extractAllFeatures) {
+    this(ltrScoringModel, Collections.<String, String[]>emptyMap(), 
extractAllFeatures);
+  }
+
+  public LTRScoringQuery(LTRScoringModel ltrScoringModel, 
+      Map<String, String[]> externalFeatureInfo, 
+      boolean extractAllFeatures) {
+    this.ltrScoringModel = ltrScoringModel;
+    this.efi = externalFeatureInfo;
+    this.extractAllFeatures = extractAllFeatures;
+  }
+
+  public LTRScoringModel getScoringModel() {
+    return ltrScoringModel;
+  }
+
+  public void setFeatureLogger(FeatureLogger fl) {
+    this.fl = fl;
+  }
+
+  public FeatureLogger getFeatureLogger() {
+    return fl;
+  }
+
+  public Map<String,String[]> getExternalFeatureInfo() {
+    return efi;
+  }
+
+  
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = CLASS_NAME_HASH;
+    result = (prime * result) + ((ltrScoringModel == null) ? 0 : 
ltrScoringModel.hashCode());
+    if (efi == null) {
+      result = (prime * result) + 0;
+    }
+    else {
+      for (final Entry<String,String[]> entry : efi.entrySet()) {
+        final String key = entry.getKey();
+        final String[] values = entry.getValue();
+        result = (prime * result) + key.hashCode();
+        result = (prime * result) + Arrays.hashCode(values);
+      }
+    }
+    result = (prime * result) + this.toString().hashCode();
+    return result;
+  }
+  @Override
+  public boolean equals(Object o) {
+    return o != null && getClass().isAssignableFrom(o.getClass()) && 
equalsTo(getClass().cast(o));
+  }
+
+  private boolean equalsTo(LTRScoringQuery other) {
+    if (ltrScoringModel == null) {
+      if (other.ltrScoringModel != null) {
+        return false;
+      }
+    } else if (!ltrScoringModel.equals(other.ltrScoringModel)) {
+      return false;
+    }
+    if (efi == null) {
+      if (other.efi != null) {
+        return false;
+      }
+    } else {
+      if (other.efi == null || efi.size() != other.efi.size()) {
+        return false;
+      }
+      for(final Entry<String,String[]> entry : efi.entrySet()) {
+        final String key = entry.getKey();
+        final String[] otherValues = other.efi.get(key);
+        if (otherValues == null || 
!Arrays.equals(otherValues,entry.getValue())) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public ModelWeight createWeight(IndexSearcher searcher, boolean needsScores)
+      throws IOException {   
+    final Collection<Feature> modelFeatures = ltrScoringModel.getFeatures();
+    final Collection<Feature> allFeatures = ltrScoringModel.getAllFeatures();
+    int modelFeatSize = modelFeatures.size();
+   
+    Collection<Feature> features = null;
+    if (this.extractAllFeatures) {
+      features = allFeatures;
+    }
+    else{
+      features =  modelFeatures;
+    }
+    final FeatureWeight[] extractedFeatureWeights = new 
FeatureWeight[features.size()];
+    final FeatureWeight[] modelFeaturesWeights = new 
FeatureWeight[modelFeatSize];
+    List<FeatureWeight > featureWeights = new ArrayList<>(features.size());
+    
+    createWeights(searcher, needsScores, featureWeights, features);
+    int i=0, j = 0;
+    if (this.extractAllFeatures) {
+      for (final FeatureWeight fw : featureWeights) {
+        extractedFeatureWeights[i++] = fw;
+      }
+      for (final Feature f : modelFeatures){
+        modelFeaturesWeights[j++] = extractedFeatureWeights[f.getIndex()]; // 
we can lookup by featureid because all features will be extracted when 
this.extractAllFeatures is set
+      }
+    }
+    else{
+      for (final FeatureWeight fw: featureWeights){
+        extractedFeatureWeights[i++] = fw;
+        modelFeaturesWeights[j++] = fw; 
+      }
+    }
+    return new ModelWeight(searcher, modelFeaturesWeights, 
extractedFeatureWeights, allFeatures.size());
+  }
+
+  private void createWeights(IndexSearcher searcher, boolean needsScores,
+      List<FeatureWeight > featureWeights, Collection<Feature> features) 
throws IOException {
+    // since the feature store is a linkedhashmap order is preserved
+    for (final Feature f : features) {
+        try{
+          FeatureWeight fw = f.createWeight(searcher, needsScores, efi);
+          featureWeights.add(fw);
+        } catch (final Exception e) {
+          throw new RuntimeException("Exception from createWeight for " + 
f.toString() + " "
+              + e.getMessage(), e);
+        }
+      }
+  }
+
+  @Override
+  public String toString(String field) {
+    return field;
+  }
+
+  public class FeatureInfo {
+    String name;
+    float value;
+    boolean used;
+
+    FeatureInfo(String n, float v, boolean u){
+      name = n; value = v; used = u; 
+    }
+
+    public void setScore(float score){
+      this.value = score;
+    }
+
+    public String getName(){
+      return name;
+    }
+
+    public float getValue(){
+      return value;
+    }
+
+    public boolean isUsed(){
+      return used;
+    }
+
+    public void setUsed(boolean used){
+      this.used = used;
+    }
+  }
+
+  public class ModelWeight extends Weight {
+
+    IndexSearcher searcher;
+
+    // List of the model's features used for scoring. This is a subset of the
+    // features used for logging.
+    FeatureWeight[] modelFeatureWeights;
+    float[] modelFeatureValuesNormalized;
+    FeatureWeight[] extractedFeatureWeights;
+
+    // List of all the feature names, values - used for both scoring and 
logging
+    /*
+     *  What is the advantage of using a hashmap here instead of an array of 
objects?        
+     *     A set of arrays was used earlier and the elements were accessed 
using the featureId. 
+     *     With the updated logic to create weights selectively, 
+     *     the number of elements in the array can be fewer than the total 
number of features. 
+     *     When [features] are not requested, only the model features are 
extracted. 
+     *     In this case, the indexing by featureId, fails. For this reason, 
+     *     we need a map which holds just the features that were triggered by 
the documents in the result set. 
+     *  
+     */
+    FeatureInfo[] featuresInfo;
+    /* 
+     * @param modelFeatureWeights 
+     *     - should be the same size as the number of features used by the 
model
+     * @param extractedFeatureWeights
+     *     - if features are requested from the same store as model feature 
store,
+     *       this will be the size of total number of features in the model 
feature store
+     *       else, this will be the size of the modelFeatureWeights  
+     * @param allFeaturesSize
+     *     - total number of feature in the feature store used by this model
+     */
+    public ModelWeight(IndexSearcher searcher, FeatureWeight[] 
modelFeatureWeights,
+        FeatureWeight[] extractedFeatureWeights, int allFeaturesSize) {
+      super(LTRScoringQuery.this);
+      this.searcher = searcher;
+      this.extractedFeatureWeights = extractedFeatureWeights;
+      this.modelFeatureWeights = modelFeatureWeights;
+      this.modelFeatureValuesNormalized = new 
float[modelFeatureWeights.length];
+      this.featuresInfo = new FeatureInfo[allFeaturesSize];
+      setFeaturesInfo();
+    }
+
+    private void setFeaturesInfo(){
+      for (int i = 0; i < extractedFeatureWeights.length;++i){
+        String featName = extractedFeatureWeights[i].getName();
+        int featId = extractedFeatureWeights[i].getIndex();
+        float value = extractedFeatureWeights[i].getDefaultValue();
+        featuresInfo[featId] = new FeatureInfo(featName,value,false);
+      } 
+    }
+
+    public FeatureInfo[] getFeaturesInfo(){
+      return featuresInfo;
+    }
+
+    @Override
+    public void normalize(float norm, float boost) {
+        // Intentionally ignored
+    }
+
+    public float getValueForNormalization() throws IOException {
+        return 1.0f;
+    }
+
+    /**
+     * Goes through all the stored feature values, and calculates the 
normalized
+     * values for all the features that will be used for scoring.
+     */
+    private void makeNormalizedFeatures() {
+      int pos = 0;
+      for (final FeatureWeight feature : modelFeatureWeights) {
+        final int featureId = feature.getIndex();
+        FeatureInfo fInfo = featuresInfo[featureId];
+        if (fInfo.isUsed()) { // not checking for finfo == null as that would 
be a bug we should catch 
+          modelFeatureValuesNormalized[pos] = fInfo.getValue();
+        } else {
+          modelFeatureValuesNormalized[pos] = feature.getDefaultValue();
+        }
+        pos++;
+      }
+      ltrScoringModel.normalizeFeaturesInPlace(modelFeatureValuesNormalized);
+    }
+
+    @Override
+    public Explanation explain(LeafReaderContext context, int doc)
+        throws IOException {
+
+      final Explanation[] explanations = new 
Explanation[this.featuresInfo.length];
+      for (final FeatureWeight feature : extractedFeatureWeights) {
+        explanations[feature.getIndex()] = feature.explain(context, doc);
+      }
+      final List<Explanation> featureExplanations = new ArrayList<>();
+      for (int idx = 0 ;idx < modelFeatureWeights.length; ++idx) {
+        final FeatureWeight f = modelFeatureWeights[idx]; 
+        Explanation e = 
ltrScoringModel.getNormalizerExplanation(explanations[f.getIndex()], idx);
+        featureExplanations.add(e);
+      }
+      final ModelScorer bs = scorer(context);
+      bs.iterator().advance(doc);
+
+      final float finalScore = bs.score();
+
+      return ltrScoringModel.explain(context, doc, finalScore, 
featureExplanations);
+
+    }
+
+    @Override
+    public void extractTerms(Set<Term> terms) {
+      for (final FeatureWeight feature : extractedFeatureWeights) {
+        feature.extractTerms(terms);
+      }
+    }
+
+    protected void reset() {
+      for (int i = 0; i < extractedFeatureWeights.length;++i){
+        int featId = extractedFeatureWeights[i].getIndex();
+        float value = extractedFeatureWeights[i].getDefaultValue();
+        featuresInfo[featId].setScore(value); // need to set default value 
everytime as the default value is used in 'dense' mode even if used=false
+        featuresInfo[featId].setUsed(false);
+      }
+    }
+
+    @Override
+    public ModelScorer scorer(LeafReaderContext context) throws IOException {
+
+      final List<FeatureScorer> featureScorers = new ArrayList<FeatureScorer>(
+          extractedFeatureWeights.length);
+      for (final FeatureWeight featureWeight : extractedFeatureWeights) {
+        final FeatureScorer scorer = featureWeight.scorer(context);
+        if (scorer != null) {
+          featureScorers.add(featureWeight.scorer(context));
+        }
+      }
+      // Always return a ModelScorer, even if no features match, because we
+      // always need to call
+      // score on the model for every document, since 0 features matching could
+      // return a
+      // non 0 score for a given model.
+      ModelScorer mscorer = new ModelScorer(this, featureScorers);
+      return mscorer;
+
+    }
+
+    public class ModelScorer extends Scorer {
+      final private DocInfo docInfo;
+      final private Scorer featureTraversalScorer;
+
+      public DocInfo getDocInfo() {
+        return docInfo;
+      }
+
+      public ModelScorer(Weight weight, List<FeatureScorer> featureScorers) {
+        super(weight);
+        docInfo = new DocInfo();
+        for (final FeatureScorer subSocer : featureScorers) {
+          subSocer.setDocInfo(docInfo);
+        }
+        if (featureScorers.size() <= 1) { // TODO: Allow the use of dense
+          // features in other cases
+          featureTraversalScorer = new DenseModelScorer(weight, 
featureScorers);
+        } else {
+          featureTraversalScorer = new SparseModelScorer(weight, 
featureScorers);
+        }
+      }
+
+      @Override
+      public Collection<ChildScorer> getChildren() {
+        return featureTraversalScorer.getChildren();
+      }
+
+      @Override
+      public int docID() {
+        return featureTraversalScorer.docID();
+      }
+
+      @Override
+      public float score() throws IOException {
+        return featureTraversalScorer.score();
+      }
+
+      @Override
+      public int freq() throws IOException {
+        return featureTraversalScorer.freq();
+      }
+
+      @Override
+      public DocIdSetIterator iterator() {
+        return featureTraversalScorer.iterator();
+      }
+
+      public class SparseModelScorer extends Scorer {
+        protected DisiPriorityQueue subScorers;
+        protected ScoringQuerySparseIterator itr;
+
+        protected int targetDoc = -1;
+        protected int activeDoc = -1;
+
+        protected SparseModelScorer(Weight weight,
+            List<FeatureScorer> featureScorers) {
+          super(weight);
+          if (featureScorers.size() <= 1) {
+            throw new IllegalArgumentException(
+                "There must be at least 2 subScorers");
+          }
+          subScorers = new DisiPriorityQueue(featureScorers.size());
+          for (final Scorer scorer : featureScorers) {
+            final DisiWrapper w = new DisiWrapper(scorer);
+            subScorers.add(w);
+          }
+
+          itr = new ScoringQuerySparseIterator(subScorers);
+        }
+
+        @Override
+        public int docID() {
+          return itr.docID();
+        }
+
+        @Override
+        public float score() throws IOException {
+          final DisiWrapper topList = subScorers.topList();
+          // If target doc we wanted to advance to matches the actual doc
+          // the underlying features advanced to, perform the feature
+          // calculations,
+          // otherwise just continue with the model's scoring process with 
empty
+          // features.
+          reset();
+          if (activeDoc == targetDoc) {
+            for (DisiWrapper w = topList; w != null; w = w.next) {
+              final Scorer subScorer = w.scorer;
+              FeatureWeight scFW = (FeatureWeight) subScorer.getWeight();
+              final int featureId = scFW.getIndex();
+              featuresInfo[featureId].setScore(subScorer.score());
+              featuresInfo[featureId].setUsed(true);
+            }
+          }
+          makeNormalizedFeatures();
+          return ltrScoringModel.score(modelFeatureValuesNormalized);
+        }
+
+        @Override
+        public int freq() throws IOException {
+          final DisiWrapper subMatches = subScorers.topList();
+          int freq = 1;
+          for (DisiWrapper w = subMatches.next; w != null; w = w.next) {
+            freq += 1;
+          }
+          return freq;
+        }
+
+        @Override
+        public DocIdSetIterator iterator() {
+          return itr;
+        }
+
+        @Override
+        public final Collection<ChildScorer> getChildren() {
+          final ArrayList<ChildScorer> children = new ArrayList<>();
+          for (final DisiWrapper scorer : subScorers) {
+            children.add(new ChildScorer(scorer.scorer, "SHOULD"));
+          }
+          return children;
+        }
+
+        protected class ScoringQuerySparseIterator extends
+        DisjunctionDISIApproximation {
+
+          public ScoringQuerySparseIterator(DisiPriorityQueue subIterators) {
+            super(subIterators);
+          }
+
+          @Override
+          public final int nextDoc() throws IOException {
+            if (activeDoc == targetDoc) {
+              activeDoc = super.nextDoc();
+            } else if (activeDoc < targetDoc) {
+              activeDoc = super.advance(targetDoc + 1);
+            }
+            return ++targetDoc;
+          }
+
+          @Override
+          public final int advance(int target) throws IOException {
+            // If target doc we wanted to advance to matches the actual doc
+            // the underlying features advanced to, perform the feature
+            // calculations,
+            // otherwise just continue with the model's scoring process with
+            // empty features.
+            if (activeDoc < target) {
+              activeDoc = super.advance(target);
+            }
+            targetDoc = target;
+            return targetDoc;
+          }
+        }
+
+      }
+
+      public class DenseModelScorer extends Scorer {
+        int activeDoc = -1; // The doc that our scorer's are actually at
+        int targetDoc = -1; // The doc we were most recently told to go to
+        int freq = -1;
+        List<FeatureScorer> featureScorers;
+
+        protected DenseModelScorer(Weight weight,
+            List<FeatureScorer> featureScorers) {
+          super(weight);
+          this.featureScorers = featureScorers;
+        }
+
+        @Override
+        public int docID() {
+          return targetDoc;
+        }
+
+        @Override
+        public float score() throws IOException {
+          reset();
+          freq = 0;
+          if (targetDoc == activeDoc) {
+            for (final Scorer scorer : featureScorers) {
+              if (scorer.docID() == activeDoc) {
+                freq++;
+                FeatureWeight scFW = (FeatureWeight) scorer.getWeight();
+                final int featureId = scFW.getIndex();
+                featuresInfo[featureId].setScore(scorer.score());
+                featuresInfo[featureId].setUsed(true);
+              }
+            }
+          }
+          makeNormalizedFeatures();
+          return ltrScoringModel.score(modelFeatureValuesNormalized);
+        }
+
+        @Override
+        public final Collection<ChildScorer> getChildren() {
+          final ArrayList<ChildScorer> children = new ArrayList<>();
+          for (final Scorer scorer : featureScorers) {
+            children.add(new ChildScorer(scorer, "SHOULD"));
+          }
+          return children;
+        }
+
+        @Override
+        public int freq() throws IOException {
+          return freq;
+        }
+
+        @Override
+        public DocIdSetIterator iterator() {
+          return new DenseIterator();
+        }
+
+        class DenseIterator extends DocIdSetIterator {
+
+          @Override
+          public int docID() {
+            return targetDoc;
+          }
+
+          @Override
+          public int nextDoc() throws IOException {
+            if (activeDoc <= targetDoc) {
+              activeDoc = NO_MORE_DOCS;
+              for (final Scorer scorer : featureScorers) {
+                if (scorer.docID() != NO_MORE_DOCS) {
+                  activeDoc = Math.min(activeDoc, scorer.iterator().nextDoc());
+                }
+              }
+            }
+            return ++targetDoc;
+          }
+
+          @Override
+          public int advance(int target) throws IOException {
+            if (activeDoc < target) {
+              activeDoc = NO_MORE_DOCS;
+              for (final Scorer scorer : featureScorers) {
+                if (scorer.docID() != NO_MORE_DOCS) {
+                  activeDoc = Math.min(activeDoc,
+                      scorer.iterator().advance(target));
+                }
+              }
+            }
+            targetDoc = target;
+            return target;
+          }
+
+          @Override
+          public long cost() {
+            long sum = 0;
+            for (int i = 0; i < featureScorers.size(); i++) {
+              sum += featureScorers.get(i).iterator().cost();
+            }
+            return sum;
+          }
+
+        }
+      }
+    }
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryBuilder.java 
b/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryBuilder.java
new file mode 100644
index 0000000..2d24ed7
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryBuilder.java
@@ -0,0 +1,38 @@
+package org.wikimedia.search.extra.ltr;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+
+import java.io.IOException;
+
+public class LTRScoringQueryBuilder extends QueryBuilder {
+    private String modelName = null;
+    private boolean extractAllFeatures = false;
+
+    public LTRScoringQueryBuilder() {
+    }
+
+    public LTRScoringQueryBuilder modelName(String modelName) {
+        this.modelName = modelName;
+        return this;
+    }
+
+    public LTRScoringQueryBuilder extractAllFeatures(boolean 
extractAllFeatures) {
+        this.extractAllFeatures = extractAllFeatures;
+        return this;
+    }
+
+    @Override
+    protected void doXContent(XContentBuilder builder, Params params) throws 
IOException {
+        builder.startObject(LTRScoringQueryParser.NAME);
+        if (modelName == null) {
+            throw new ElasticsearchParseException("LearnToRank requires 
'model' to be provided");
+        }
+        builder.field("model", modelName);
+        if (extractAllFeatures != false) {
+            builder.field("extractAllFeatures", extractAllFeatures);
+        }
+        builder.endObject();
+    }
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryParser.java 
b/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryParser.java
new file mode 100644
index 0000000..2a79977
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/LTRScoringQueryParser.java
@@ -0,0 +1,59 @@
+package org.wikimedia.search.extra.ltr;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.wikimedia.search.extra.ltr.store.LTRStoreService;
+
+import java.io.IOException;
+
+public class LTRScoringQueryParser implements QueryParser {
+    public static final String NAME = "ltr";
+   
+    final private LTRStoreService store;
+
+    @Inject
+    public LTRScoringQueryParser(LTRStoreService store) {
+        this.store = store;
+    }
+
+    public String[] names() {
+        return new String[]{NAME, "learn_to_rank", "learnToRank"};
+    }
+
+    public Query parse(QueryParseContext parseContext) throws IOException, 
QueryParsingException {
+        XContentParser parser = parseContext.parser();
+
+        XContentParser.Token token;
+        String currentFieldName = null;
+        String modelName = null;
+        boolean extractAllFeatures = false;
+        while ((token = parser.nextToken()) != 
XContentParser.Token.END_OBJECT) {
+            if (token == XContentParser.Token.FIELD_NAME) {
+                currentFieldName = parser.currentName();
+            } else if (token.isValue()) {
+                if ("model".equals(currentFieldName)) {
+                    modelName = parser.text();
+                } else if ("extract_all_features".equals(currentFieldName) || 
"extractAllFeatures".equals(currentFieldName)) {
+                    extractAllFeatures = parser.booleanValue();
+                } else {
+                    throw new QueryParsingException(parseContext, "[ltr] query 
does not support [" + currentFieldName + "]");
+                }
+            } else if (token == XContentParser.Token.START_ARRAY) {
+                throw new QueryParsingException(parseContext, "[ltr] query 
does not support array values");
+            }
+        }
+
+        if (modelName == null) {
+            throw new QueryParsingException(parseContext, "learn_to_rank 
requires 'model' to be specified");
+        }
+        if (!store.getModelStore().containsModel(modelName)) {
+            throw new QueryParsingException(parseContext, "[ltr] unknown model 
[" + modelName + "]");
+        }
+
+        return new LTRScoringQuery(store.getModelStore().getModel(modelName));
+    }
+}
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/feature/Feature.java 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/Feature.java
new file mode 100644
index 0000000..0d5cd5b
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/feature/Feature.java
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.feature;
+
+import java.lang.Class;
+import java.lang.ClassNotFoundException;
+import java.lang.NoSuchMethodException;
+import java.lang.Exception;
+import java.io.IOException;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.wikimedia.search.extra.ltr.DocInfo;
+
+/**
+ * A recipe for computing a feature.  Subclass this for specialized feature 
calculations.
+ * <p>
+ * A feature consists of
+ * <ul>
+ * <li> a name as the identifier
+ * <li> parameters to represent the specific feature
+ * </ul>
+ * <p>
+ * Example configuration (snippet):
+ * <pre>{
+   "class" : "...",
+   "name" : "myFeature",
+   "params" : {
+       ...
+   }
+}</pre>
+ * <p>
+ * {@link Feature} is an abstract class and concrete classes should implement 
+ * the {@link #validate()} function, and must implement the {@link 
#paramsToMap()}
+ * and createWeight() methods.
+ */
+public abstract class Feature extends Query {
+
+  final protected String name;
+  private int index = -1;
+  private float defaultValue = 0.0f;
+
+  final private Map<String,Object> params;
+  private static final int CLASS_NAME_HASH = 
Feature.class.getName().hashCode();
+
+  public static Feature getInstance(String className, String name, 
Map<String,Object> params) {
+    Class clazz;
+    try {
+      clazz = Feature.class.getClassLoader().loadClass(className);
+    } catch (ClassNotFoundException e) {
+      throw new FeatureException("Feature type does not exist " + className, 
e);
+    }
+    if (!Feature.class.isAssignableFrom(clazz)) {
+        throw new FeatureException("Feature type is not a Feature " + 
className);
+    }
+    // TODO: Assert instanceof Feature?
+    Feature f;
+    try {
+        f = (Feature)clazz.getConstructor(String.class, Map.class).newInstance(
+            name, params);
+    } catch (NoSuchMethodException e) {
+        throw new FeatureException("Feature type does not have valid 
constructor: " + className, e);
+    } catch (Exception e) {
+        throw new FeatureException("Feature type failed construction: " + 
className, e);
+    }
+    f.validate();
+    return f;
+  }
+
+  public Feature(String name, Map<String,Object> params) {
+    this.name = name;
+    this.params = params;
+  }
+  
+  /**
+   * On construction of a feature, this function confirms
+   * that the feature parameters are validated
+   * 
+   * @throws FeatureException
+   *             Feature Exception
+   */
+  protected void validate() throws FeatureException {
+    
+  }
+
+  @Override
+  public String toString(String field) {
+    final StringBuilder sb = new StringBuilder(64); // default initialCapacity 
of 16 won't be enough
+    sb.append(getClass().getSimpleName());
+    sb.append(" [name=").append(name);
+    final LinkedHashMap<String,Object> params = paramsToMap();
+    if (params != null) {
+      sb.append(", params=").append(params);
+    }
+    sb.append(']');
+    return sb.toString();
+  }
+
+  public abstract FeatureWeight createWeight(IndexSearcher searcher,
+      boolean needsScores, Map<String,String[]> efi) throws IOException;
+
+  public float getDefaultValue() {
+    return defaultValue;
+  }
+
+  public void setDefaultValue(String value){
+    defaultValue = Float.parseFloat(value);
+  }
+
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = CLASS_NAME_HASH;
+    result = (prime * result) + index;
+    result = (prime * result) + ((name == null) ? 0 : name.hashCode());
+    result = (prime * result) + ((params == null) ? 0 : params.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    return o != null && Feature.class.isAssignableFrom(o.getClass()) && 
equalsTo(getClass().cast(o));
+  }
+
+  private boolean equalsTo(Feature other) {
+    if (index != other.index) {
+        return false;
+    }
+    if (name == null) {
+        if (other.name != null) {
+            return false;
+        }
+    } else if (!name.equals(other.name)) {
+        return false;
+    }
+    if (params == null) {
+        if (other.params != null) {
+            return false;
+        }
+    } else if (!params.equals(other.params)) {
+        return false;
+    }
+    return true;
+  }
+
+  /**
+   * @return the name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * @return the id
+   */
+  public int getIndex() {
+    return index;
+  }
+
+  /**
+   * @param index
+   *          Unique ID for this feature. Similar to feature name, except it 
can
+   *          be used to directly access the feature in the global list of
+   *          features.
+   */
+  public void setIndex(int index) {
+    this.index = index;
+  }
+
+  public abstract LinkedHashMap<String,Object> paramsToMap();
+  /**
+   * Weight for a feature
+   **/
+  public abstract class FeatureWeight extends Weight {
+
+    final protected IndexSearcher searcher;
+    final protected Map<String,String[]> efi;
+
+    /**
+     * Initialize a feature without the normalizer from the feature file. This 
is
+     * called on initial construction since multiple models share the same
+     * features, but have different normalizers. A concrete model's feature is
+     * copied through featForNewModel().
+     *
+     * @param q
+     *          Solr query associated with this FeatureWeight
+     * @param searcher
+     *          Solr searcher available for features if they need them
+     */
+    public FeatureWeight(Query q, IndexSearcher searcher, Map<String,String[]> 
efi) {
+      super(q);
+      this.searcher = searcher;
+      this.efi = efi;
+    }
+
+    public String getName() {
+      return Feature.this.getName();
+    }
+
+    public int getIndex() {
+      return Feature.this.getIndex();
+    }
+
+    public float getDefaultValue() {
+      return Feature.this.getDefaultValue();
+    }
+
+    @Override
+    public abstract FeatureScorer scorer(LeafReaderContext context)
+        throws IOException;
+
+    @Override
+    public Explanation explain(LeafReaderContext context, int doc)
+        throws IOException {
+      final FeatureScorer r = scorer(context);
+      float score = getDefaultValue();
+      if (r != null) {
+        r.iterator().advance(doc);
+        if (r.docID() == doc) score = r.score();
+        return Explanation.match(score, toString());
+      }else{
+        return Explanation.match(score, "The feature has no value");
+      }
+    }
+
+    @Override
+    public void normalize(float norm, float boost) {
+        // Intentionally ignored
+    }
+
+    @Override
+    public float getValueForNormalization() throws IOException {
+        return 1.0f;
+    }
+
+    /**
+     * Used in the FeatureWeight's explain. Each feature should implement this
+     * returning properties of the specific scorer useful for an explain. For
+     * example "MyCustomClassFeature [name=" + name + "myVariable:" + 
myVariable +
+     * "]";  If not provided, a default implementation will return basic 
feature 
+     * properties, which might not include query time specific values.
+     */
+    @Override
+    public String toString() {
+      return Feature.this.toString();
+    }
+
+    @Override
+    public void extractTerms(Set<Term> terms) {
+      // needs to be implemented by query subclasses
+      throw new UnsupportedOperationException();
+    }
+
+    /**
+     * A 'recipe' for computing a feature
+     */
+    public abstract class FeatureScorer extends Scorer {
+
+      final protected String name;
+      private DocInfo docInfo;
+      protected DocIdSetIterator itr;
+
+      public FeatureScorer(Feature.FeatureWeight weight,
+          DocIdSetIterator itr) {
+        super(weight);
+        this.itr = itr;
+        name = weight.getName();
+        docInfo = null;
+      }
+
+      @Override
+      public abstract float score() throws IOException;
+
+      /**
+       * Used to provide context from initial score steps to later reranking 
steps.
+       */
+      public void setDocInfo(DocInfo docInfo) {
+        this.docInfo = docInfo;
+      }
+
+      public DocInfo getDocInfo() {
+        return docInfo;
+      }
+
+      @Override
+      public int freq() throws IOException {
+        throw new UnsupportedOperationException();
+      }
+
+      @Override
+      public int docID() {
+        return itr.docID();
+      }
+
+      @Override
+      public DocIdSetIterator iterator() {
+        return itr;
+      }
+    }
+
+    /**
+     * Default FeatureScorer class that returns the score passed in. Can be 
used
+     * as a simple ValueFeature, or to return a default scorer in case an
+     * underlying feature's scorer is null.
+     */
+    public class ValueFeatureScorer extends FeatureScorer {
+      float constScore;
+
+      public ValueFeatureScorer(FeatureWeight weight, float constScore,
+          DocIdSetIterator itr) {
+        super(weight,itr);
+        this.constScore = constScore;
+      }
+
+      @Override
+      public float score() {
+        return constScore;
+      }
+
+    }
+
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/feature/FeatureException.java 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/FeatureException.java
new file mode 100644
index 0000000..c5a9d73
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/feature/FeatureException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.feature;
+
+public class FeatureException extends RuntimeException {
+
+  private static final long serialVersionUID = 1L;
+
+  public FeatureException(String message) {
+    super(message);
+  }
+
+  public FeatureException(String message, Exception cause) {
+    super(message, cause);
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/feature/FieldLengthFeature.java 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/FieldLengthFeature.java
new file mode 100644
index 0000000..2ce8b8e
--- /dev/null
+++ 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/FieldLengthFeature.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.feature;
+
+import java.io.IOException;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.SmallFloat;
+/**
+ * This feature returns the length of a field (in terms) for the current 
document.
+ * Example configuration:
+ * <pre>{
+  "name":  "titleLength",
+  "class": "org.wikimedia.search.extra.ltr.feature.FieldLengthFeature",
+  "params": {
+      "field": "title"
+  }
+}</pre>
+ * Note: since this feature relies on norms values that are stored in a single 
byte
+ * the value of the feature could have a lightly different value.
+ * (see also {@link org.apache.lucene.search.similarities.ClassicSimilarity})
+ **/
+public class FieldLengthFeature extends Feature {
+
+  private String field;
+
+  public String getField() {
+    return field;
+  }
+
+  public void setField(String field) {
+    this.field = field;
+  }
+
+  @Override
+  public LinkedHashMap<String,Object> paramsToMap() {
+    final LinkedHashMap<String,Object> params = new LinkedHashMap<>(1, 1.0f);
+    params.put("field", field);
+    return params;
+  }
+
+  /** Cache of decoded bytes. */
+
+  private static final float[] NORM_TABLE = new float[256];
+
+  static {
+    NORM_TABLE[0] = 0;
+    for (int i = 1; i < 256; i++) {
+      float norm = SmallFloat.byte315ToFloat((byte) i);
+      NORM_TABLE[i] = 1.0f / (norm * norm);
+    }
+  }
+
+  /**
+   * Decodes the norm value, assuming it is a single byte.
+   *
+   */
+
+  private final float decodeNorm(long norm) {
+    return NORM_TABLE[(int) (norm & 0xFF)]; // & 0xFF maps negative bytes to
+    // positive above 127
+  }
+
+  public FieldLengthFeature(String name, Map<String,Object> params) {
+    super(name, params);
+  }
+
+  @Override
+  public FeatureWeight createWeight(IndexSearcher searcher, boolean 
needsScores, 
+      Map<String,String[]> efi)
+      throws IOException {
+
+    return new FieldLengthFeatureWeight(searcher, efi);
+  }
+
+
+  public class FieldLengthFeatureWeight extends FeatureWeight {
+
+    public FieldLengthFeatureWeight(IndexSearcher searcher, 
Map<String,String[]> efi) {
+      super(FieldLengthFeature.this, searcher, efi);
+    }
+
+    @Override
+    public FeatureScorer scorer(LeafReaderContext context) throws IOException {
+      NumericDocValues norms = context.reader().getNormValues(field);
+      if (norms == null){
+        return new ValueFeatureScorer(this, 0f, 
+            DocIdSetIterator.all(DocIdSetIterator.NO_MORE_DOCS));
+      }
+      return new FieldLengthFeatureScorer(this, 
DocIdSetIterator.all(context.reader().maxDoc()), norms);
+    }
+
+    public class FieldLengthFeatureScorer extends FeatureScorer {
+
+      NumericDocValues norms = null;
+
+      public FieldLengthFeatureScorer(FeatureWeight weight,
+          DocIdSetIterator itr, NumericDocValues norms) throws IOException {
+        super(weight, itr);
+        this.norms = norms;
+
+        // In the constructor, docId is -1, so using 0 as default lookup
+        final IndexableField idxF = searcher.doc(0).getField(field);
+        if (idxF.fieldType().omitNorms()) {
+          throw new IOException(
+              "FieldLengthFeatures can't be used if omitNorms is enabled 
(field="
+                  + field + ")");
+        }
+      }
+
+      @Override
+      public float score() throws IOException {
+        final long l = norms.get(docID());
+        final float numTerms = decodeNorm(l);
+        return numTerms;
+      }
+    }
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/feature/FieldValueFeature.java 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/FieldValueFeature.java
new file mode 100644
index 0000000..84eecf4
--- /dev/null
+++ 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/FieldValueFeature.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.feature;
+
+import java.io.IOException;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+
+import com.google.common.collect.Sets;
+/**
+ * This feature returns the value of a field in the current document
+ * Example configuration:
+ * <pre>{
+  "name":  "rawHits",
+  "class": "org.wikimedia.search.extra.ltr.feature.FieldValueFeature",
+  "params": {
+      "field": "hits"
+  }
+}</pre>
+ */
+public class FieldValueFeature extends Feature {
+
+  private String field;
+  private Set<String> fieldAsSet;
+
+  public String getField() {
+    return field;
+  }
+
+  public void setField(String field) {
+    this.field = field;
+    fieldAsSet = Sets.newHashSet(field);
+  }
+
+  @Override
+  public LinkedHashMap<String,Object> paramsToMap() {
+    final LinkedHashMap<String,Object> params = new LinkedHashMap<>(1, 1.0f);
+    params.put("field", field);
+    return params;
+  }
+
+  public FieldValueFeature(String name, Map<String,Object> params) {
+    super(name, params);
+  }
+
+  @Override
+  public FeatureWeight createWeight(IndexSearcher searcher, boolean 
needsScores, 
+      Map<String,String[]> efi)
+      throws IOException {
+    return new FieldValueFeatureWeight(searcher, efi);
+  }
+
+  public class FieldValueFeatureWeight extends FeatureWeight {
+
+    public FieldValueFeatureWeight(IndexSearcher searcher, 
+        Map<String,String[]> efi) {
+      super(FieldValueFeature.this, searcher, efi);
+    }
+
+    @Override
+    public FeatureScorer scorer(LeafReaderContext context) throws IOException {
+      return new FieldValueFeatureScorer(this, context,
+          DocIdSetIterator.all(DocIdSetIterator.NO_MORE_DOCS));
+    }
+
+    public class FieldValueFeatureScorer extends FeatureScorer {
+
+      LeafReaderContext context = null;
+
+      public FieldValueFeatureScorer(FeatureWeight weight,
+          LeafReaderContext context, DocIdSetIterator itr) {
+        super(weight, itr);
+        this.context = context;
+      }
+
+      @Override
+      public float score() throws IOException {
+
+        try {
+          final Document document = context.reader().document(itr.docID(),
+              fieldAsSet);
+          final IndexableField indexableField = document.getField(field);
+          if (indexableField == null) {
+            return getDefaultValue();
+          }
+          final Number number = indexableField.numericValue();
+          if (number != null) {
+            return number.floatValue();
+          } else {
+            final String string = indexableField.stringValue();
+            // boolean values in the index are encoded with the
+            // chars T/F
+            if (string.equals("T")) {
+              return 1;
+            }
+            if (string.equals("F")) {
+              return 0;
+            }
+          }
+        } catch (final IOException e) {
+          throw new FeatureException(
+              e.toString() + ": " +
+              "Unable to extract feature for "
+              + name, e);
+        }
+        return getDefaultValue();
+      }
+    }
+  }
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/feature/ValueFeature.java 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/ValueFeature.java
new file mode 100644
index 0000000..21580ea
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/feature/ValueFeature.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.feature;
+
+import java.io.IOException;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+/**
+ * This feature allows to return a constant given value for the current 
document.
+ *
+ * Example configuration:
+ * <pre>{
+   "name" : "userFromMobile",
+   "class" : "org.wikimedia.search.extra.ltr.feature.ValueFeature",
+   "params" : { "value" : "${userFromMobile}", "required":true }
+ }</pre>
+ *
+ *You can place a constant value like "1.3f" in the value params, but many 
times you
+ *would want to pass in external information to use per request. For instance, 
maybe
+ *you want to rank things differently if the search came from a mobile device, 
or maybe
+ *you want to use your external query intent system as a feature.
+ *In the rerank request you can pass in rq={... efi.userFromMobile=1}, and the 
above
+ *feature will return 1 for all the docs for that request.  If required is set 
to true,
+ *the request will return an error since you failed to pass in the efi, 
otherwise if will
+ *just skip the feature and use a default value of 0 instead.
+ **/
+public class ValueFeature extends Feature {
+  private float configValue = -1f;
+  private String configValueStr = null;
+
+  private Object value = null;
+  private Boolean required = null;
+
+  public Object getValue() {
+    return value;
+  }
+
+  public void setValue(Object value) {
+    this.value = value;
+    if (value instanceof String) {
+      configValueStr = (String) value;
+      if (configValueStr.trim().isEmpty()) {
+        throw new FeatureException("Empty field 'value' in params for " + 
this);
+      }
+    } else if (value instanceof Double) {
+      configValue = ((Double) value).floatValue();
+    } else if (value instanceof Float) {
+      configValue = ((Float) value).floatValue();
+    } else if (value instanceof Integer) {
+      configValue = ((Integer) value).floatValue();
+    } else if (value instanceof Long) {
+      configValue = ((Long) value).floatValue();
+    } else {
+      throw new FeatureException("Invalid type for 'value' in params for " + 
this);
+    }
+  }
+
+  public boolean isRequired() {
+    return Boolean.TRUE.equals(required);
+  }
+
+  public void setRequired(boolean required) {
+    this.required = required;
+  }
+
+  @Override
+  public LinkedHashMap<String,Object> paramsToMap() {
+    final LinkedHashMap<String,Object> params = new LinkedHashMap<>(2, 1.0f);
+    params.put("value", value);
+    if (required != null) {
+      params.put("required", required);
+    }
+    return params;
+  }
+
+  public ValueFeature(String name, Map<String,Object> params) {
+    super(name, params);
+  }
+
+  @Override
+  public FeatureWeight createWeight(IndexSearcher searcher, boolean 
needsScores,
+      Map<String,String[]> efi)
+      throws IOException {
+    return new ValueFeatureWeight(searcher, efi);
+  }
+
+  public class ValueFeatureWeight extends FeatureWeight {
+
+    final protected Float featureValue;
+
+    public ValueFeatureWeight(IndexSearcher searcher,
+        Map<String,String[]> efi) {
+      super(ValueFeature.this, searcher, efi);
+      if (configValueStr != null) {
+        // TODO Figure out what the MacroExpander equivilent would be to 
handle configValueStr
+        if (isRequired()) {
+          throw new FeatureException(this.getClass().getSimpleName() + " 
requires efi parameter that was not passed in request.");
+        } else {
+          featureValue = null;
+        }
+      } else {
+        featureValue = configValue;
+      }
+    }
+
+    @Override
+    public FeatureScorer scorer(LeafReaderContext context) throws IOException {
+      if(featureValue!=null)
+        return new ValueFeatureScorer(this, featureValue,
+            DocIdSetIterator.all(DocIdSetIterator.NO_MORE_DOCS));
+      else
+        return null;
+    }
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/feature/package-info.java 
b/src/main/java/org/wikimedia/search/extra/ltr/feature/package-info.java
new file mode 100644
index 0000000..0c218b9
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/feature/package-info.java
@@ -0,0 +1 @@
+package org.wikimedia.search.extra.ltr.feature;
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/model/LTRScoringModel.java 
b/src/main/java/org/wikimedia/search/extra/ltr/model/LTRScoringModel.java
new file mode 100644
index 0000000..be23fc6
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/model/LTRScoringModel.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.model;
+
+import java.lang.Class;
+import java.lang.ClassNotFoundException;
+import java.lang.Exception;
+import java.lang.NoSuchMethodException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Explanation;
+import org.wikimedia.search.extra.ltr.feature.Feature;
+import org.wikimedia.search.extra.ltr.feature.FeatureException;
+import org.wikimedia.search.extra.ltr.norm.IdentityNormalizer;
+import org.wikimedia.search.extra.ltr.norm.Normalizer;
+
+/**
+ * A scoring model computes scores that can be used to rerank documents.
+ * <p>
+ * A scoring model consists of
+ * <ul>
+ * <li> a list of features ({@link Feature}) and
+ * <li> a list of normalizers ({@link Normalizer}) plus
+ * <li> parameters or configuration to represent the scoring algorithm.
+ * </ul>
+ * <p>
+ * Example configuration (snippet):
+ * <pre>{
+   "class" : "...",
+   "name" : "myModelName",
+   "features" : [
+       {
+         "name" : "isBook"
+       },
+       {
+         "name" : "originalScore",
+         "norm": {
+             "class" : 
"org.wikimedia.search.extra.ltr.norm.StandardNormalizer",
+             "params" : { "avg":"100", "std":"10" }
+         }
+       },
+       {
+         "name" : "price",
+         "norm": {
+             "class" : "org.wikimedia.search.extra.ltr.norm.MinMaxNormalizer",
+             "params" : { "min":"0", "max":"1000" }
+         }
+       }
+   ],
+   "params" : {
+       ...
+   }
+}</pre>
+ * <p>
+ * {@link LTRScoringModel} is an abstract class and concrete classes must
+ * implement the {@link #score(float[])} and
+ * {@link #explain(LeafReaderContext, int, float, List)} methods.
+ */
+public abstract class LTRScoringModel {
+
+  protected final String name;
+  private final String featureStoreName;
+  protected final List<Feature> features;
+  private final List<Feature> allFeatures;
+  private final Map<String,Object> params;
+  private final List<Normalizer> norms;
+
+  public static LTRScoringModel getInstance(String className, String name,
+      List<Feature> features, List<Normalizer> norms,
+      String featureStoreName, List<Feature> allFeatures,
+      Map<String,Object> params) throws ModelException {
+    Class clazz;
+    try {
+      clazz = LTRScoringModel.class.getClassLoader().loadClass(className);
+    } catch (ClassNotFoundException e) {
+      throw new ModelException("Model type does not exist " + className, e);
+    }
+    if (!LTRScoringModel.class.isAssignableFrom(clazz)) {
+       throw new ModelException("Model type is not an LTRScoringModel: " + 
className);
+    }
+    // create an instance of the model
+    LTRScoringModel model;
+    try {
+        model = (LTRScoringModel)clazz.getConstructor(
+              String.class, List.class, List.class, String.class, List.class, 
Map.class
+           ).newInstance(
+              name, features, norms, featureStoreName, allFeatures, params
+           );
+    } catch (NoSuchMethodException e) {
+        throw new ModelException("Model type does not have valid constructor: 
" + className, e);
+    } catch (Exception e) {
+        throw new ModelException("Model type failed construction: " + 
className, e);
+    }
+    model.validate();
+    return model;
+  }
+
+  public LTRScoringModel(String name, List<Feature> features,
+      List<Normalizer> norms,
+      String featureStoreName, List<Feature> allFeatures,
+      Map<String,Object> params) {
+    this.name = name;
+    this.features = features;
+    this.featureStoreName = featureStoreName;
+    this.allFeatures = allFeatures;
+    this.params = params;
+    this.norms = norms;
+  }
+
+  /**
+   * Validate that settings make sense and throws
+   * {@link ModelException} if they do not make sense.
+   */
+  public void validate() throws ModelException {
+    if (features.isEmpty()) {
+      throw new ModelException("no features declared for model "+name);
+    }
+    final HashSet<String> featureNames = new HashSet<>();
+    for (final Feature feature : features) {
+      final String featureName = feature.getName();
+      if (!featureNames.add(featureName)) {
+        throw new ModelException("duplicated feature "+featureName+" in model 
"+name);
+      }
+    }
+    if (features.size() != norms.size()) {
+      throw new ModelException("counted "+features.size()+" features and 
"+norms.size()+" norms in model "+name);
+    }
+  }
+
+  /**
+   * @return the norms
+   */
+  public List<Normalizer> getNorms() {
+    return Collections.unmodifiableList(norms);
+  }
+
+  /**
+   * @return the name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * @return the features
+   */
+  public List<Feature> getFeatures() {
+    return Collections.unmodifiableList(features);
+  }
+
+  public Map<String,Object> getParams() {
+    return params;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = (prime * result) + ((features == null) ? 0 : features.hashCode());
+    result = (prime * result) + ((name == null) ? 0 : name.hashCode());
+    result = (prime * result) + ((params == null) ? 0 : params.hashCode());
+    result = (prime * result) + ((norms == null) ? 0 : norms.hashCode());
+    result = (prime * result) + ((featureStoreName == null) ? 0 : 
featureStoreName.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    final LTRScoringModel other = (LTRScoringModel) obj;
+    if (features == null) {
+      if (other.features != null) {
+        return false;
+      }
+    } else if (!features.equals(other.features)) {
+      return false;
+    }
+    if (norms == null) {
+      if (other.norms != null) {
+        return false;
+      }
+    } else if (!norms.equals(other.norms)) {
+      return false;
+    }
+    if (name == null) {
+      if (other.name != null) {
+        return false;
+      }
+    } else if (!name.equals(other.name)) {
+      return false;
+    }
+    if (params == null) {
+      if (other.params != null) {
+        return false;
+      }
+    } else if (!params.equals(other.params)) {
+      return false;
+    }
+    if (featureStoreName == null) {
+      if (other.featureStoreName != null) {
+        return false;
+      }
+    } else if (!featureStoreName.equals(other.featureStoreName)) {
+      return false;
+    }
+
+
+    return true;
+  }
+
+  public boolean hasParams() {
+    return !((params == null) || params.isEmpty());
+  }
+
+  public Collection<Feature> getAllFeatures() {
+    return allFeatures;
+  }
+
+  public String getFeatureStoreName() {
+    return featureStoreName;
+  }
+
+  /**
+   * Given a list of normalized values for all features a scoring algorithm
+   * cares about, calculate and return a score.
+   *
+   * @param modelFeatureValuesNormalized
+   *          List of normalized feature values. Each feature is identified by
+   *          its id, which is the index in the array
+   * @return The final score for a document
+   */
+  public abstract float score(float[] modelFeatureValuesNormalized);
+
+  /**
+   * Similar to the score() function, except it returns an explanation of how
+   * the features were used to calculate the score.
+   *
+   * @param context
+   *          Context the document is in
+   * @param doc
+   *          Document to explain
+   * @param finalScore
+   *          Original score
+   * @param featureExplanations
+   *          Explanations for each feature calculation
+   * @return Explanation for the scoring of a document
+   */
+  public abstract Explanation explain(LeafReaderContext context, int doc,
+      float finalScore, List<Explanation> featureExplanations);
+
+  @Override
+  public String toString() {
+    return  getClass().getSimpleName() + "(name="+getName()+")";
+  }
+
+  /**
+   * Goes through all the stored feature values, and calculates the normalized
+   * values for all the features that will be used for scoring.
+   */
+  public void normalizeFeaturesInPlace(float[] modelFeatureValues) {
+    float[] modelFeatureValuesNormalized = modelFeatureValues;
+    if (modelFeatureValues.length != norms.size()) {
+      throw new FeatureException("Must have normalizer for every feature");
+    }
+    for(int idx = 0; idx < modelFeatureValuesNormalized.length; ++idx) {
+      modelFeatureValuesNormalized[idx] =
+          norms.get(idx).normalize(modelFeatureValuesNormalized[idx]);
+    }
+  }
+
+  public Explanation getNormalizerExplanation(Explanation e, int idx) {
+    Normalizer n = norms.get(idx);
+    if (n != IdentityNormalizer.INSTANCE) {
+      return n.explain(e);
+    }
+    return e;
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/model/LinearModel.java 
b/src/main/java/org/wikimedia/search/extra/ltr/model/LinearModel.java
new file mode 100644
index 0000000..622a0be
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/model/LinearModel.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.model;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Explanation;
+import org.wikimedia.search.extra.ltr.feature.Feature;
+import org.wikimedia.search.extra.ltr.norm.Normalizer;
+
+/**
+ * A scoring model that computes scores using a dot product. 
+ * Example models are RankSVM and Pranking.
+ * <p>
+ * Example configuration:
+ * <pre>{
+   "class" : "org.wikimedia.search.extra.ltr.model.LinearModel",
+   "name" : "myModelName",
+   "features" : [
+       { "name" : "userTextTitleMatch" },
+       { "name" : "originalScore" },
+       { "name" : "isBook" }
+   ],
+   "params" : {
+       "weights" : {
+           "userTextTitleMatch" : 1.0,
+           "originalScore" : 0.5,
+           "isBook" : 0.1
+       }
+   }
+}</pre>
+ * <p>
+ * Background reading:
+ * <ul>
+ * <li> <a 
href="http://www.cs.cornell.edu/people/tj/publications/joachims_02c.pdf";>
+ * Thorsten Joachims. Optimizing Search Engines Using Clickthrough Data.
+ * Proceedings of the ACM Conference on Knowledge Discovery and Data Mining 
(KDD), ACM, 2002.</a>
+ * </ul>
+ * <ul>
+ * <li> <a href="https://papers.nips.cc/paper/2023-pranking-with-ranking.pdf";>
+ * Koby Crammer and Yoram Singer. Pranking with Ranking.
+ * Advances in Neural Information Processing Systems (NIPS), 2001.</a>
+ * </ul>
+ */
+public class LinearModel extends LTRScoringModel {
+
+  protected Float[] featureToWeight;
+
+  public void setWeights(Object weights) {
+    final Map<String,Double> modelWeights = (Map<String,Double>) weights;
+    for (int ii = 0; ii < features.size(); ++ii) {
+      final String key = features.get(ii).getName();
+      final Double val = modelWeights.get(key);
+      featureToWeight[ii] = (val == null ? null : new Float(val.floatValue()));
+    }
+  }
+
+  public LinearModel(String name, List<Feature> features,
+      List<Normalizer> norms,
+      String featureStoreName, List<Feature> allFeatures,
+      Map<String,Object> params) {
+    super(name, features, norms, featureStoreName, allFeatures, params);
+    featureToWeight = new Float[features.size()];
+  }
+
+  @Override
+  public void validate() throws ModelException {
+    super.validate();
+
+    final ArrayList<String> missingWeightFeatureNames = new 
ArrayList<String>();
+    for (int i = 0; i < features.size(); ++i) {
+      if (featureToWeight[i] == null) {
+        missingWeightFeatureNames.add(features.get(i).getName());
+      }
+    }
+    if (missingWeightFeatureNames.size() == features.size()) {
+      throw new ModelException("Model " + name + " doesn't contain any 
weights");
+    }
+    if (!missingWeightFeatureNames.isEmpty()) {
+      throw new ModelException("Model " + name + " lacks weight(s) for 
"+missingWeightFeatureNames);
+    }
+  }
+
+  @Override
+  public float score(float[] modelFeatureValuesNormalized) {
+    float score = 0;
+    for (int i = 0; i < modelFeatureValuesNormalized.length; ++i) {
+      score += modelFeatureValuesNormalized[i] * featureToWeight[i];
+    }
+    return score;
+  }
+
+  @Override
+  public Explanation explain(LeafReaderContext context, int doc,
+      float finalScore, List<Explanation> featureExplanations) {
+    final List<Explanation> details = new ArrayList<>();
+    int index = 0;
+
+    for (final Explanation featureExplain : featureExplanations) {
+      final List<Explanation> featureDetails = new ArrayList<>();
+      featureDetails.add(Explanation.match(featureToWeight[index],
+          "weight on feature"));
+      featureDetails.add(featureExplain);
+
+      details.add(Explanation.match(featureExplain.getValue()
+          * featureToWeight[index], "prod of:", featureDetails));
+      index++;
+    }
+
+    return Explanation.match(finalScore, toString()
+        + " model applied to features, sum of:", details);
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder(getClass().getSimpleName());
+    sb.append("(name=").append(getName());
+    sb.append(",featureWeights=[");
+    for (int ii = 0; ii < features.size(); ++ii) {
+      if (ii>0) sb.append(',');
+      final String key = features.get(ii).getName();
+      sb.append(key).append('=').append(featureToWeight[ii]);
+    }
+    sb.append("])");
+    return sb.toString();
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/model/ModelException.java 
b/src/main/java/org/wikimedia/search/extra/ltr/model/ModelException.java
new file mode 100644
index 0000000..defec42
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/model/ModelException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.model;
+
+public class ModelException extends RuntimeException {
+
+  private static final long serialVersionUID = 1L;
+
+  public ModelException(String message) {
+    super(message);
+  }
+
+  public ModelException(String message, Exception cause) {
+    super(message, cause);
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/model/package-info.java 
b/src/main/java/org/wikimedia/search/extra/ltr/model/package-info.java
new file mode 100644
index 0000000..770de36
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/model/package-info.java
@@ -0,0 +1 @@
+package org.wikimedia.search.extra.ltr.model;
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/norm/IdentityNormalizer.java 
b/src/main/java/org/wikimedia/search/extra/ltr/norm/IdentityNormalizer.java
new file mode 100644
index 0000000..a968433
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/norm/IdentityNormalizer.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.norm;
+
+import java.util.LinkedHashMap;
+
+/**
+ * A Normalizer that normalizes a feature value to itself. This is the
+ * default normalizer class, if no normalizer is configured then the
+ * IdentityNormalizer will be used.
+ */
+public class IdentityNormalizer extends Normalizer {
+
+  public static final IdentityNormalizer INSTANCE = new IdentityNormalizer();
+
+  public IdentityNormalizer() {
+
+  }
+
+  @Override
+  public float normalize(float value) {
+    return value;
+  }
+
+  @Override
+  public LinkedHashMap<String,Object> paramsToMap() {
+    return null;
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName();
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/norm/MinMaxNormalizer.java 
b/src/main/java/org/wikimedia/search/extra/ltr/norm/MinMaxNormalizer.java
new file mode 100644
index 0000000..afdae20
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/norm/MinMaxNormalizer.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.norm;
+
+import java.util.LinkedHashMap;
+
+/**
+ * A Normalizer to scale a feature value using a (min,max) range.
+ * <p>
+ * Example configuration:
+<pre>
+"norm" : {
+    "class" : "org.wikimedia.search.extra.ltr.norm.MinMaxNormalizer",
+    "params" : { "min":"0", "max":"50" }
+}
+</pre>
+ * Example normalizations:
+ * <ul>
+ * <li>-5 will be normalized to -0.1
+ * <li>55 will be normalized to  1.1
+ * <li>+5 will be normalized to +0.1
+ * </ul>
+ */
+public class MinMaxNormalizer extends Normalizer {
+
+  private float min = Float.NEGATIVE_INFINITY;
+  private float max = Float.POSITIVE_INFINITY;
+  private float delta = max - min;
+
+  private void updateDelta() {
+    delta = max - min;
+  }
+
+  public float getMin() {
+    return min;
+  }
+
+  public void setMin(float min) {
+    this.min = min;
+    updateDelta();
+  }
+
+  public void setMin(String min) {
+    this.min = Float.parseFloat(min);
+    updateDelta();
+  }
+
+  public float getMax() {
+    return max;
+  }
+
+  public void setMax(float max) {
+    this.max = max;
+    updateDelta();
+  }
+
+  public void setMax(String max) {
+    this.max = Float.parseFloat(max);
+    updateDelta();
+  }
+  
+  @Override
+  protected void validate() throws NormalizerException {
+    super.validate();
+    if (delta == 0f)
+      throw 
+      new NormalizerException("MinMax Normalizer delta must not be zero " +
+      "| min = " + min + ",max = " + max + ",delta = " + delta);
+  }
+
+  @Override
+  public float normalize(float value) {
+    return (value - min) / delta;
+  }
+
+  @Override
+  public LinkedHashMap<String,Object> paramsToMap() {
+    final LinkedHashMap<String,Object> params = new LinkedHashMap<>(2, 1.0f);
+    params.put("min", min);
+    params.put("max", max);
+    return params;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder(64); // default initialCapacity 
of 16 won't be enough
+    sb.append(getClass().getSimpleName()).append('(');
+    sb.append("min=").append(min);
+    sb.append(",max=").append(max).append(')');
+    return sb.toString();
+  }
+
+}
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/norm/Normalizer.java 
b/src/main/java/org/wikimedia/search/extra/ltr/norm/Normalizer.java
new file mode 100644
index 0000000..6e47a0c
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/norm/Normalizer.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.norm;
+
+import java.lang.Class;
+import java.lang.ClassNotFoundException;
+import java.lang.NoSuchMethodException;
+import java.lang.Exception;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.lucene.search.Explanation;
+
+/**
+ * A normalizer normalizes the value of a feature. After the feature values
+ * have been computed, the {@link Normalizer#normalize(float)} methods will
+ * be called and the resulting values will be used by the model.
+ */
+public abstract class Normalizer {
+
+
+  public abstract float normalize(float value);
+
+  public abstract LinkedHashMap<String,Object> paramsToMap();
+
+  public Explanation explain(Explanation explain) {
+    final float normalized = normalize(explain.getValue());
+    final String explainDesc = "normalized using " + toString();
+
+    return Explanation.match(normalized, explainDesc, explain);
+  }
+
+  public static Normalizer getInstance(String className, Map<String,Object> 
params) {
+    Class clazz;
+    try {
+      clazz = Normalizer.class.getClassLoader().loadClass(className);
+    } catch (ClassNotFoundException e) {
+      throw new NormalizerException("Normalizer type does not exist: " + 
className, e);
+    }
+    if (!Normalizer.class.isAssignableFrom(clazz)) {
+      throw new NormalizerException("Normalizer type is not a Normalizer: " + 
className);
+    }
+    Normalizer f;
+    try {
+      f = (Normalizer) clazz.getConstructor().newInstance();
+    } catch (NoSuchMethodException e) {
+      throw new NormalizerException("Normalizer type does not have valid 
constructor: " + className, e);
+    } catch (Exception e) {
+      throw new NormalizerException("Normalizer type failed construction: " + 
className, e);
+    }
+    f.validate();
+    return f;
+  }
+
+  /**
+   * On construction of a normalizer, this function confirms
+   * that the normalizer parameters are validated
+   * 
+   * @throws NormalizerException
+   *             Normalizer Exception
+   */
+  protected void validate() throws NormalizerException {
+
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/norm/NormalizerException.java 
b/src/main/java/org/wikimedia/search/extra/ltr/norm/NormalizerException.java
new file mode 100644
index 0000000..531af9e
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/norm/NormalizerException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.norm;
+
+public class NormalizerException extends RuntimeException {
+
+  private static final long serialVersionUID = 1L;
+
+  public NormalizerException(String message) {
+    super(message);
+  }
+
+  public NormalizerException(String message, Exception cause) {
+    super(message, cause);
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/norm/StandardNormalizer.java 
b/src/main/java/org/wikimedia/search/extra/ltr/norm/StandardNormalizer.java
new file mode 100644
index 0000000..87a35f8
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/norm/StandardNormalizer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.norm;
+
+import java.util.LinkedHashMap;
+
+/**
+ * A Normalizer to scale a feature value around an 
average-and-standard-deviation distribution.
+ * <p>
+ * Example configuration:
+<pre>
+"norm" : {
+    "class" : "org.wikimedia.search.extra.ltr.norm.StandardNormalizer",
+    "params" : { "avg":"42", "std":"6" }
+}
+</pre>
+ * <p>
+ * Example normalizations:
+ * <ul>
+ * <li>39 will be normalized to -0.5
+ * <li>42 will be normalized to  0
+ * <li>45 will be normalized to +0.5
+ * </ul>
+ */
+public class StandardNormalizer extends Normalizer {
+
+  private float avg = 0f;
+  private float std = 1f;
+
+  public float getAvg() {
+    return avg;
+  }
+
+  public void setAvg(float avg) {
+    this.avg = avg;
+  }
+
+  public float getStd() {
+    return std;
+  }
+
+  public void setStd(float std) {
+    this.std = std;
+  }
+
+  public void setAvg(String avg) {
+    this.avg = Float.parseFloat(avg);
+  }
+
+  public void setStd(String std) {
+    this.std = Float.parseFloat(std);
+  }
+
+  @Override
+  public float normalize(float value) {
+    return (value - avg) / std;
+  }
+  
+  @Override
+  protected void validate() throws NormalizerException {
+    super.validate();
+    if (std <= 0f)
+      throw 
+      new NormalizerException("Standard Normalizer standard deviation must "
+            + "be positive | avg = " + avg + ",std = " + std);
+  }
+
+  @Override
+  public LinkedHashMap<String,Object> paramsToMap() {
+    final LinkedHashMap<String,Object> params = new LinkedHashMap<>(2, 1.0f);
+    params.put("avg", avg);
+    params.put("std", std);
+    return params;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder(64); // default initialCapacity 
of 16 won't be enough
+    sb.append(getClass().getSimpleName()).append('(');
+    sb.append("avg=").append(avg);
+    sb.append(",std=").append(avg).append(')');
+    return sb.toString();
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/norm/package-info.java 
b/src/main/java/org/wikimedia/search/extra/ltr/norm/package-info.java
new file mode 100644
index 0000000..246d101
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/norm/package-info.java
@@ -0,0 +1 @@
+package org.wikimedia.search.extra.ltr.norm;
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/package-info.java 
b/src/main/java/org/wikimedia/search/extra/ltr/package-info.java
new file mode 100644
index 0000000..531ba57
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/package-info.java
@@ -0,0 +1 @@
+package org.wikimedia.search.extra.ltr;
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/store/FeatureStore.java 
b/src/main/java/org/wikimedia/search/extra/ltr/store/FeatureStore.java
new file mode 100644
index 0000000..e6a6ab2
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/store/FeatureStore.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.store;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+
+import org.wikimedia.search.extra.ltr.feature.Feature;
+import org.wikimedia.search.extra.ltr.feature.FeatureException;
+
+public class FeatureStore {
+
+  /** the name of the default feature store **/
+  public static final String DEFAULT_FEATURE_STORE_NAME = "_DEFAULT_";
+
+  private final LinkedHashMap<String,Feature> store = new LinkedHashMap<>(); 
// LinkedHashMap because we need predictable iteration order
+  private final String name;
+
+  public FeatureStore(String name) {
+    this.name = name;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public Feature get(String name) {
+    return store.get(name);
+  }
+
+  public int size() {
+    return store.size();
+  }
+
+  @SuppressWarnings("unused")
+  public boolean containsFeature(String name) {
+    return store.containsKey(name);
+  }
+
+  public void add(Feature feature) {
+    final String name = feature.getName();
+    if (store.containsKey(name)) {
+      throw new FeatureException(name
+          + " already contained in the store, please use a different name");
+    }
+    feature.setIndex(store.size());
+    store.put(name, feature);
+  }
+
+  public List<Feature> getFeatures() {
+    final List<Feature> storeValues = new ArrayList<Feature>(store.values());
+    return Collections.unmodifiableList(storeValues);
+  }
+
+  public void clear() {
+    store.clear();
+  }
+
+  @Override
+  public String toString() {
+    return "FeatureStore [features=" + store.keySet() + "]";
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/store/LTRStoreService.java 
b/src/main/java/org/wikimedia/search/extra/ltr/store/LTRStoreService.java
new file mode 100644
index 0000000..767a3b6
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/store/LTRStoreService.java
@@ -0,0 +1,43 @@
+package org.wikimedia.search.extra.ltr.store;
+
+import java.util.HashMap;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.settings.Settings;
+
+public class LTRStoreService extends 
AbstractLifecycleComponent<LTRStoreService> {
+    private final ModelStore modelStore;
+    private final HashMap<String, FeatureStore> featureStores;
+
+    public LTRStoreService(Settings settings) {
+        super(settings);
+        modelStore = new ModelStore();
+        featureStores = new HashMap<>();
+        FeatureStore defaultFeatureStore = new 
FeatureStore(FeatureStore.DEFAULT_FEATURE_STORE_NAME);
+        featureStores.put(defaultFeatureStore.getName(), defaultFeatureStore);
+    }
+
+    @Override
+    protected void doStart() throws ElasticsearchException {
+    }
+
+    @Override
+    protected void doStop() throws ElasticsearchException {
+    }
+
+    @Override
+    protected void doClose() throws ElasticsearchException {
+    }
+
+    public ModelStore getModelStore() {
+        return modelStore;
+    }
+
+    public FeatureStore getFeatureStore(String name) {
+        if (!featureStores.containsKey(name)) {
+            throw new ElasticsearchException("Unknown feature store " + name);
+        }
+        return featureStores.get(name);
+    }
+}
diff --git a/src/main/java/org/wikimedia/search/extra/ltr/store/ModelStore.java 
b/src/main/java/org/wikimedia/search/extra/ltr/store/ModelStore.java
new file mode 100644
index 0000000..7765a49
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/store/ModelStore.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.wikimedia.search.extra.ltr.store;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.wikimedia.search.extra.ltr.model.LTRScoringModel;
+import org.wikimedia.search.extra.ltr.model.ModelException;
+
+/**
+ * Contains the model and features declared.
+ */
+public class ModelStore {
+
+  private final Map<String,LTRScoringModel> availableModels;
+
+  public ModelStore() {
+    availableModels = new HashMap<>();
+  }
+
+  public synchronized LTRScoringModel getModel(String name) {
+    return availableModels.get(name);
+  }
+
+  public boolean containsModel(String modelName) {
+    return availableModels.containsKey(modelName);
+  }
+
+  public void clear() {
+    availableModels.clear();
+  }
+
+  public int size() {
+    return availableModels.size();
+  }
+
+  public List<LTRScoringModel> getModels() {
+    final List<LTRScoringModel> availableModelsValues =
+        new ArrayList<LTRScoringModel>(availableModels.values());
+    return Collections.unmodifiableList(availableModelsValues);
+  }
+
+  @Override
+  public String toString() {
+    return "ModelStore [availableModels=" + availableModels.keySet() + "]";
+  }
+
+  public void delete(String modelName) {
+    availableModels.remove(modelName);
+  }
+
+  public synchronized void addModel(LTRScoringModel modeldata)
+      throws ModelException {
+    final String name = modeldata.getName();
+
+    if (containsModel(name)) {
+      throw new ModelException("model '" + name
+          + "' already exists. Please use a different name");
+    }
+
+    availableModels.put(modeldata.getName(), modeldata);
+  }
+
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/ltr/store/package-info.java 
b/src/main/java/org/wikimedia/search/extra/ltr/store/package-info.java
new file mode 100644
index 0000000..36a8ec1
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/ltr/store/package-info.java
@@ -0,0 +1 @@
+package org.wikimedia.search.extra.ltr.store;

-- 
To view, visit https://gerrit.wikimedia.org/r/324852
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I00c499c207ba7de05b396c928d7c1c172ac72431
Gerrit-PatchSet: 1
Gerrit-Project: search/extra
Gerrit-Branch: master
Gerrit-Owner: EBernhardson <ebernhard...@wikimedia.org>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to