cpoerschke commented on a change in pull request #189:
URL: https://github.com/apache/solr/pull/189#discussion_r657317494



##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
##########
@@ -50,50 +49,52 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * The ranking query that is run, reranking results using the
- * LTRScoringModel algorithm
- */
+/** The ranking query that is run, reranking results using the LTRScoringModel 
algorithm */
 public class LTRScoringQuery extends Query implements Accountable {
 
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  private static final long BASE_RAM_BYTES = 
RamUsageEstimator.shallowSizeOfInstance(LTRScoringQuery.class);
+  private static final long BASE_RAM_BYTES =
+      RamUsageEstimator.shallowSizeOfInstance(LTRScoringQuery.class);
 
   // contains a description of the model
-  final private LTRScoringModel ltrScoringModel;
-  final private boolean extractAllFeatures;
-  final private LTRThreadModule ltrThreadMgr;
-  final private Semaphore querySemaphore; // limits the number of threads per 
query, so that multiple requests can be serviced simultaneously
+  private final LTRScoringModel ltrScoringModel;
+  private final boolean extractAllFeatures;
+  private final LTRThreadModule ltrThreadMgr;
+  private final Semaphore
+      querySemaphore; // limits the number of threads per query, so that 
multiple requests can be

Review comment:
       perhaps moving the comment to a separate line would be clearer here.

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
##########
@@ -460,23 +476,24 @@ public Explanation explain(LeafReaderContext context, int 
doc)
       final float finalScore = bs.score();
 
       return ltrScoringModel.explain(context, doc, finalScore, 
featureExplanations);
-
     }
 
     protected void reset() {
-      for (int i = 0; i < extractedFeatureWeights.length;++i){
+      for (int i = 0; i < extractedFeatureWeights.length; ++i) {
         int featId = extractedFeatureWeights[i].getIndex();
         float value = extractedFeatureWeights[i].getDefaultValue();
-        featuresInfo[featId].setValue(value); // need to set default value 
everytime as the default value is used in 'dense' mode even if used=false
+        featuresInfo[featId].setValue(
+            value); // need to set default value everytime as the default 
value is used in 'dense'
+        // mode even if used=false

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/response/transform/LTRFeatureLoggerTransformerFactory.java
##########
@@ -210,99 +209,120 @@ public void setContext(ResultContext context) {
 
       searcher = context.getSearcher();
       if (searcher == null) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "searcher is null");
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "searcher 
is null");
       }
       leafContexts = searcher.getTopReaderContext().leaves();
       if (threadManager != null) {
-        
threadManager.setExecutor(context.getRequest().getCore().getCoreContainer().getUpdateShardHandler().getUpdateExecutor());
+        threadManager.setExecutor(
+            context
+                .getRequest()
+                .getCore()
+                .getCoreContainer()
+                .getUpdateShardHandler()
+                .getUpdateExecutor());
       }
 
       rerankingQueriesFromContext = 
SolrQueryRequestContextUtils.getScoringQueries(req);
-      docsWereNotReranked = (rerankingQueriesFromContext == null || 
rerankingQueriesFromContext.length == 0);
+      docsWereNotReranked =
+          (rerankingQueriesFromContext == null || 
rerankingQueriesFromContext.length == 0);
       String transformerFeatureStore = 
SolrQueryRequestContextUtils.getFvStoreName(req);
-      Map<String, String[]> transformerExternalFeatureInfo = 
LTRQParserPlugin.extractEFIParams(localparams);
+      Map<String, String[]> transformerExternalFeatureInfo =
+          LTRQParserPlugin.extractEFIParams(localparams);
 
       final LoggingModel loggingModel = 
createLoggingModel(transformerFeatureStore);
-      setupRerankingQueriesForLogging(transformerFeatureStore, 
transformerExternalFeatureInfo, loggingModel);
+      setupRerankingQueriesForLogging(
+          transformerFeatureStore, transformerExternalFeatureInfo, 
loggingModel);
       setupRerankingWeightsForLogging(context);
     }
 
     /**
-     * The loggingModel is an empty model that is just used to extract the 
features
-     * and log them
+     * The loggingModel is an empty model that is just used to extract the 
features and log them
+     *
      * @param transformerFeatureStore the explicit transformer feature store
      */
     private LoggingModel createLoggingModel(String transformerFeatureStore) {
       final ManagedFeatureStore fr = 
ManagedFeatureStore.getManagedFeatureStore(req.getCore());
 
       final FeatureStore store = fr.getFeatureStore(transformerFeatureStore);
-      transformerFeatureStore = store.getName(); // if transformerFeatureStore 
was null before this gets actual name
+      transformerFeatureStore =
+          store.getName(); // if transformerFeatureStore was null before this 
gets actual name
 
-      return new LoggingModel(loggingModelName,
-          transformerFeatureStore, store.getFeatures());
+      return new LoggingModel(loggingModelName, transformerFeatureStore, 
store.getFeatures());
     }
 
     /**
      * When preparing the reranking queries for logging features various 
scenarios apply:
-     * 
-     * No Reranking 
-     * There is the need of a logger model from the default feature store or 
the explicit feature store passed
-     * to extract the feature vector
-     * 
-     * Re Ranking
-     * 1) If no explicit feature store is passed, the models for each 
reranking query can be safely re-used
-     * the feature vector can be fetched from the feature vector cache.
-     * 2) If an explicit feature store is passed, and no reranking query uses 
a model with that feature store,
-     * There is the need of a logger model to extract the feature vector
-     * 3) If an explicit feature store is passed, and there is a reranking 
query that uses a model with that feature store,
-     * the model can be re-used and there is no need for a logging model
+     *
+     * <p>No Reranking There is the need of a logger model from the default 
feature store or the
+     * explicit feature store passed to extract the feature vector
+     *
+     * <p>Re Ranking 1) If no explicit feature store is passed, the models for 
each reranking query
+     * can be safely re-used the feature vector can be fetched from the 
feature vector cache. 2) If
+     * an explicit feature store is passed, and no reranking query uses a 
model with that feature
+     * store, There is the need of a logger model to extract the feature 
vector 3) If an explicit
+     * feature store is passed, and there is a reranking query that uses a 
model with that feature
+     * store, the model can be re-used and there is no need for a logging model

Review comment:
       this comment auto-reformatting impairs content clarity.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/OriginalScoreFeature.java
##########
@@ -28,48 +27,56 @@
 import org.apache.lucene.search.Weight;
 import org.apache.solr.ltr.DocInfo;
 import org.apache.solr.request.SolrQueryRequest;
+
 /**
- * This feature returns the original score that the document had before 
performing
- * the reranking.
+ * This feature returns the original score that the document had before 
performing the reranking.
  * Example configuration:
+ *
  * <pre>{
-  "name":  "originalScore",
-  "class": "org.apache.solr.ltr.feature.OriginalScoreFeature",
-  "params": { }
-}</pre>
- **/
+ * "name":  "originalScore",
+ * "class": "org.apache.solr.ltr.feature.OriginalScoreFeature",
+ * "params": { }
+ * }</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
##########
@@ -209,96 +209,108 @@ public ModelWeight createWeight(IndexSearcher searcher, 
ScoreMode scoreMode, flo
     Collection<Feature> features = null;
     if (this.extractAllFeatures) {
       features = allFeatures;
+    } else {
+      features = modelFeatures;
     }
-    else{
-      features =  modelFeatures;
-    }
-    final Feature.FeatureWeight[] extractedFeatureWeights = new 
Feature.FeatureWeight[features.size()];
+    final Feature.FeatureWeight[] extractedFeatureWeights =
+        new Feature.FeatureWeight[features.size()];
     final Feature.FeatureWeight[] modelFeaturesWeights = new 
Feature.FeatureWeight[modelFeatSize];
-    List<Feature.FeatureWeight > featureWeights = new 
ArrayList<>(features.size());
+    List<Feature.FeatureWeight> featureWeights = new 
ArrayList<>(features.size());
 
     if (querySemaphore == null) {
       createWeights(searcher, scoreMode.needsScores(), featureWeights, 
features);
-    }
-    else{
+    } else {
       createWeightsParallel(searcher, scoreMode.needsScores(), featureWeights, 
features);
     }
-    int i=0, j = 0;
+    int i = 0, j = 0;
     if (this.extractAllFeatures) {
       for (final Feature.FeatureWeight fw : featureWeights) {
         extractedFeatureWeights[i++] = fw;
       }
-      for (final Feature f : modelFeatures){
-        modelFeaturesWeights[j++] = extractedFeatureWeights[f.getIndex()]; // 
we can lookup by featureid because all features will be extracted when 
this.extractAllFeatures is set
+      for (final Feature f : modelFeatures) {
+        modelFeaturesWeights[j++] =
+            extractedFeatureWeights[
+                f.getIndex()]; // we can lookup by featureid because all 
features will be
+        // extracted when this.extractAllFeatures is set

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
##########
@@ -209,96 +209,108 @@ public ModelWeight createWeight(IndexSearcher searcher, 
ScoreMode scoreMode, flo
     Collection<Feature> features = null;
     if (this.extractAllFeatures) {
       features = allFeatures;
+    } else {
+      features = modelFeatures;
     }
-    else{
-      features =  modelFeatures;
-    }
-    final Feature.FeatureWeight[] extractedFeatureWeights = new 
Feature.FeatureWeight[features.size()];
+    final Feature.FeatureWeight[] extractedFeatureWeights =
+        new Feature.FeatureWeight[features.size()];
     final Feature.FeatureWeight[] modelFeaturesWeights = new 
Feature.FeatureWeight[modelFeatSize];
-    List<Feature.FeatureWeight > featureWeights = new 
ArrayList<>(features.size());
+    List<Feature.FeatureWeight> featureWeights = new 
ArrayList<>(features.size());
 
     if (querySemaphore == null) {
       createWeights(searcher, scoreMode.needsScores(), featureWeights, 
features);
-    }
-    else{
+    } else {
       createWeightsParallel(searcher, scoreMode.needsScores(), featureWeights, 
features);
     }
-    int i=0, j = 0;
+    int i = 0, j = 0;
     if (this.extractAllFeatures) {
       for (final Feature.FeatureWeight fw : featureWeights) {
         extractedFeatureWeights[i++] = fw;
       }
-      for (final Feature f : modelFeatures){
-        modelFeaturesWeights[j++] = extractedFeatureWeights[f.getIndex()]; // 
we can lookup by featureid because all features will be extracted when 
this.extractAllFeatures is set
+      for (final Feature f : modelFeatures) {
+        modelFeaturesWeights[j++] =
+            extractedFeatureWeights[
+                f.getIndex()]; // we can lookup by featureid because all 
features will be
+        // extracted when this.extractAllFeatures is set
       }
-    }
-    else{
-      for (final Feature.FeatureWeight fw: featureWeights){
+    } else {
+      for (final Feature.FeatureWeight fw : featureWeights) {
         extractedFeatureWeights[i++] = fw;
         modelFeaturesWeights[j++] = fw;
       }
     }
     return new ModelWeight(modelFeaturesWeights, extractedFeatureWeights, 
allFeatures.size());
   }
 
-  private void createWeights(IndexSearcher searcher, boolean needsScores,
-      List<Feature.FeatureWeight > featureWeights, Collection<Feature> 
features) throws IOException {
+  private void createWeights(
+      IndexSearcher searcher,
+      boolean needsScores,
+      List<Feature.FeatureWeight> featureWeights,
+      Collection<Feature> features)
+      throws IOException {
     final SolrQueryRequest req = getRequest();
     // since the feature store is a linkedhashmap order is preserved
     for (final Feature f : features) {
-      try{
+      try {
         Feature.FeatureWeight fw = f.createWeight(searcher, needsScores, req, 
originalQuery, efi);
         featureWeights.add(fw);
       } catch (final Exception e) {
-        throw new RuntimeException("Exception from createWeight for " + 
f.toString() + " "
-            + e.getMessage(), e);
+        throw new RuntimeException(
+            "Exception from createWeight for " + f.toString() + " " + 
e.getMessage(), e);
       }
     }
   }
 
-  private class CreateWeightCallable implements 
Callable<Feature.FeatureWeight>{
-    final private Feature f;
-    final private IndexSearcher searcher;
-    final private boolean needsScores;
-    final private SolrQueryRequest req;
+  private class CreateWeightCallable implements 
Callable<Feature.FeatureWeight> {
+    private final Feature f;
+    private final IndexSearcher searcher;
+    private final boolean needsScores;
+    private final SolrQueryRequest req;
 
-    public CreateWeightCallable(Feature f, IndexSearcher searcher, boolean 
needsScores, SolrQueryRequest req){
+    public CreateWeightCallable(
+        Feature f, IndexSearcher searcher, boolean needsScores, 
SolrQueryRequest req) {
       this.f = f;
       this.searcher = searcher;
       this.needsScores = needsScores;
       this.req = req;
     }
 
     @Override
-    public Feature.FeatureWeight call() throws Exception{
+    public Feature.FeatureWeight call() throws Exception {
       try {
-        Feature.FeatureWeight fw  = f.createWeight(searcher, needsScores, req, 
originalQuery, efi);
+        Feature.FeatureWeight fw = f.createWeight(searcher, needsScores, req, 
originalQuery, efi);
         return fw;
       } catch (final Exception e) {
-        throw new RuntimeException("Exception from createWeight for " + 
f.toString() + " "
-            + e.getMessage(), e);
+        throw new RuntimeException(
+            "Exception from createWeight for " + f.toString() + " " + 
e.getMessage(), e);
       } finally {
         querySemaphore.release();
         ltrThreadMgr.releaseLTRSemaphore();
       }
     }
   } // end of call CreateWeightCallable
 
-  private void createWeightsParallel(IndexSearcher searcher, boolean 
needsScores,
-      List<Feature.FeatureWeight > featureWeights, Collection<Feature> 
features) throws RuntimeException {
+  private void createWeightsParallel(
+      IndexSearcher searcher,
+      boolean needsScores,
+      List<Feature.FeatureWeight> featureWeights,
+      Collection<Feature> features)
+      throws RuntimeException {
 
     final SolrQueryRequest req = getRequest();
-    List<Future<Feature.FeatureWeight> > futures = new 
ArrayList<>(features.size());
-    try{
+    List<Future<Feature.FeatureWeight>> futures = new 
ArrayList<>(features.size());
+    try {
       for (final Feature f : features) {
         CreateWeightCallable callable = new CreateWeightCallable(f, searcher, 
needsScores, req);
         RunnableFuture<Feature.FeatureWeight> runnableFuture = new 
FutureTask<>(callable);
-        querySemaphore.acquire(); // always acquire before the ltrSemaphore is 
acquired, to guarantee a that the current query is within the limit for max. 
threads
-        ltrThreadMgr.acquireLTRSemaphore();//may block and/or interrupt
-        ltrThreadMgr.execute(runnableFuture);//releases semaphore when done
+        querySemaphore
+            .acquire(); // always acquire before the ltrSemaphore is acquired, 
to guarantee a that
+        // the current query is within the limit for max. threads

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/FieldValueFeature.java
##########
@@ -41,27 +40,29 @@
 import org.apache.solr.search.SolrIndexSearcher;
 
 /**
- * This feature returns the value of a field in the current document.
- * The field must have stored="true" or docValues="true" properties.
- * Example configuration:
+ * This feature returns the value of a field in the current document. The 
field must have
+ * stored="true" or docValues="true" properties. Example configuration:
+ *
  * <pre>{
-  "name":  "rawHits",
-  "class": "org.apache.solr.ltr.feature.FieldValueFeature",
-  "params": {
-      "field": "hits"
-  }
-}</pre>
+ * "name":  "rawHits",
+ * "class": "org.apache.solr.ltr.feature.FieldValueFeature",
+ * "params": {
+ * "field": "hits"
+ * }
+ * }</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/FeatureLogger.java
##########
@@ -19,15 +19,18 @@
 import org.apache.solr.search.SolrIndexSearcher;
 
 /**
- * FeatureLogger can be registered in a model and provide a strategy for 
logging
- * the feature values.
+ * FeatureLogger can be registered in a model and provide a strategy for 
logging the feature values.
  */
 public abstract class FeatureLogger {
 
-  /** the name of the cache using for storing the feature value **/
+  /** the name of the cache using for storing the feature value * */

Review comment:
       this change surprises me.

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/Feature.java
##########
@@ -37,63 +36,67 @@
 import org.apache.solr.util.SolrPluginUtils;
 
 /**
- * A recipe for computing a feature.  Subclass this for specialized feature 
calculations.
- * <p>
- * A feature consists of
+ * A recipe for computing a feature. Subclass this for specialized feature 
calculations.
+ *
+ * <p>A feature consists of
+ *
  * <ul>
- * <li> a name as the identifier
- * <li> parameters to represent the specific feature
+ *   <li>a name as the identifier
+ *   <li>parameters to represent the specific feature
  * </ul>
- * <p>
- * Example configuration (snippet):
+ *
+ * <p>Example configuration (snippet):
+ *
  * <pre>{
-   "class" : "...",
-   "name" : "myFeature",
-   "params" : {
-       ...
-   }
-}</pre>
- * <p>
- * {@link Feature} is an abstract class and concrete classes should implement
- * the {@link #validate()} function, and must implement the {@link 
#paramsToMap()}
- * and createWeight() methods.
+ * "class" : "...",
+ * "name" : "myFeature",
+ * "params" : {
+ * ...
+ * }
+ * }</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/ValueFeature.java
##########
@@ -19,31 +19,31 @@
 import java.io.IOException;
 import java.util.LinkedHashMap;
 import java.util.Map;
-
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.solr.request.SolrQueryRequest;
+
 /**
  * This feature allows to return a constant given value for the current 
document.
  *
- * Example configuration:
+ * <p>Example configuration:
+ *
  * <pre>{
-   "name" : "userFromMobile",
-   "class" : "org.apache.solr.ltr.feature.ValueFeature",
-   "params" : { "value" : "${userFromMobile}", "required":true }
- }</pre>
+ * "name" : "userFromMobile",
+ * "class" : "org.apache.solr.ltr.feature.ValueFeature",
+ * "params" : { "value" : "${userFromMobile}", "required":true }
+ * }</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
##########
@@ -420,16 +437,16 @@ private void setFeaturesInfo(){
     }
 
     /**
-     * Goes through all the stored feature values, and calculates the 
normalized
-     * values for all the features that will be used for scoring.
-     * Then calculate and return the model's score.
+     * Goes through all the stored feature values, and calculates the 
normalized values for all the
+     * features that will be used for scoring. Then calculate and return the 
model's score.
      */
     private float makeNormalizedFeaturesAndScore() {
       int pos = 0;
       for (final Feature.FeatureWeight feature : modelFeatureWeights) {
         final int featureId = feature.getIndex();
         FeatureInfo fInfo = featuresInfo[featureId];
-        if (fInfo.isUsed()) { // not checking for finfo == null as that would 
be a bug we should catch
+        if (fInfo
+            .isUsed()) { // not checking for finfo == null as that would be a 
bug we should catch

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRThreadModule.java
##########
@@ -28,35 +27,38 @@
 import org.apache.solr.util.plugin.NamedListInitializedPlugin;
 
 /**
- * The LTRThreadModule is optionally used by the {@link 
org.apache.solr.ltr.search.LTRQParserPlugin} and
- * {@link 
org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory 
LTRFeatureLoggerTransformerFactory}
- * classes to parallelize the creation of {@link 
org.apache.solr.ltr.feature.Feature.FeatureWeight Feature.FeatureWeight}
- * objects.
- * <p>
- * Example configuration:
+ * The LTRThreadModule is optionally used by the {@link 
org.apache.solr.ltr.search.LTRQParserPlugin}
+ * and {@link 
org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory
+ * LTRFeatureLoggerTransformerFactory} classes to parallelize the creation of 
{@link
+ * org.apache.solr.ltr.feature.Feature.FeatureWeight Feature.FeatureWeight} 
objects.
+ *
+ * <p>Example configuration:
+ *
  * <pre>
-  &lt;queryParser name="ltr" 
class="org.apache.solr.ltr.search.LTRQParserPlugin"&gt;
-     &lt;int name="threadModule.totalPoolThreads"&gt;10&lt;/int&gt;
-     &lt;int name="threadModule.numThreadsPerRequest"&gt;5&lt;/int&gt;
-  &lt;/queryParser&gt;
-
-  &lt;transformer name="features" 
class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory"&gt;
-     &lt;int name="threadModule.totalPoolThreads"&gt;10&lt;/int&gt;
-     &lt;int name="threadModule.numThreadsPerRequest"&gt;5&lt;/int&gt;
-  &lt;/transformer&gt;
-</pre>
- * If an individual solr instance is expected to receive no more than one 
query at a time, it is best
- * to set <code>totalPoolThreads</code> and <code>numThreadsPerRequest</code> 
to the same value.
+ * &lt;queryParser name="ltr" 
class="org.apache.solr.ltr.search.LTRQParserPlugin"&gt;
+ * &lt;int name="threadModule.totalPoolThreads"&gt;10&lt;/int&gt;
+ * &lt;int name="threadModule.numThreadsPerRequest"&gt;5&lt;/int&gt;
+ * &lt;/queryParser&gt;
+ *
+ * &lt;transformer name="features" 
class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory"&gt;
+ * &lt;int name="threadModule.totalPoolThreads"&gt;10&lt;/int&gt;
+ * &lt;int name="threadModule.numThreadsPerRequest"&gt;5&lt;/int&gt;
+ * &lt;/transformer&gt;
+ * </pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/SolrFeature.java
##########
@@ -41,22 +40,23 @@
 import org.apache.solr.search.SyntaxError;
 
 /**
- * This feature allows you to reuse any Solr query as a feature. The value
- * of the feature will be the score of the given query for the current 
document.
- * See <a href="https://lucene.apache.org/solr/guide/other-parsers.html";>Solr 
documentation of other parsers</a> you can use as a feature.
- * Example configurations:
+ * This feature allows you to reuse any Solr query as a feature. The value of 
the feature will be
+ * the score of the given query for the current document. See <a
+ * href="https://lucene.apache.org/solr/guide/other-parsers.html";>Solr 
documentation of other
+ * parsers</a> you can use as a feature. Example configurations:
+ *
  * <pre>[{ "name": "isBook",
-  "class": "org.apache.solr.ltr.feature.SolrFeature",
-  "params":{ "fq": ["{!terms f=category}book"] }
-},
-{
-  "name":  "documentRecency",
-  "class": "org.apache.solr.ltr.feature.SolrFeature",
-  "params": {
-      "q": "{!func}recip( ms(NOW,publish_date), 3.16e-11, 1, 1)"
-  }
-}]</pre>
- **/
+ * "class": "org.apache.solr.ltr.feature.SolrFeature",
+ * "params":{ "fq": ["{!terms f=category}book"] }
+ * },
+ * {
+ * "name":  "documentRecency",
+ * "class": "org.apache.solr.ltr.feature.SolrFeature",
+ * "params": {
+ * "q": "{!func}recip( ms(NOW,publish_date), 3.16e-11, 1, 1)"
+ * }
+ * }]</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/FieldLengthFeature.java
##########
@@ -28,20 +27,23 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.SmallFloat;
 import org.apache.solr.request.SolrQueryRequest;
+
 /**
- * This feature returns the length of a field (in terms) for the current 
document.
- * Example configuration:
+ * This feature returns the length of a field (in terms) for the current 
document. Example
+ * configuration:
+ *
  * <pre>{
-  "name":  "titleLength",
-  "class": "org.apache.solr.ltr.feature.FieldLengthFeature",
-  "params": {
-      "field": "title"
-  }
-}</pre>
- * Note: since this feature relies on norms values that are stored in a single 
byte
- * the value of the feature could have a lightly different value.
- * (see also {@link org.apache.lucene.search.similarities.ClassicSimilarity})
- **/
+ * "name":  "titleLength",
+ * "class": "org.apache.solr.ltr.feature.FieldLengthFeature",
+ * "params": {
+ * "field": "title"
+ * }
+ * }</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/model/WrapperModel.java
##########
@@ -29,26 +28,26 @@
 /**
  * A scoring model that wraps the other model.
  *
- * <p>This model loads a model from an external resource during the 
initialization.
- * The way of fetching the wrapped model is depended on
- * the implementation of {@link WrapperModel#fetchModelMap()}.
+ * <p>This model loads a model from an external resource during the 
initialization. The way of
+ * fetching the wrapped model is depended on the implementation of {@link
+ * WrapperModel#fetchModelMap()}.
  *
- * <p>This model doesn't hold the actual parameters of the wrapped model,
- * thus it can manage large models which are difficult to upload to ZooKeeper.
+ * <p>This model doesn't hold the actual parameters of the wrapped model, thus 
it can manage large
+ * models which are difficult to upload to ZooKeeper.
  *
  * <p>Example configuration:
+ *
  * <pre>{

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/model/MultipleAdditiveTreesModel.java
##########
@@ -21,99 +21,102 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Stack;
-
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.Explanation;
 import org.apache.solr.ltr.feature.Feature;
 import org.apache.solr.ltr.norm.Normalizer;
 import org.apache.solr.util.SolrPluginUtils;
 
 /**
- * A scoring model that computes scores based on the summation of multiple 
weighted trees.
- * Example models are LambdaMART and Gradient Boosted Regression Trees (GBRT) .
- * <p>
- * Example configuration:
-<pre>{
-   "class" : "org.apache.solr.ltr.model.MultipleAdditiveTreesModel",
-   "name" : "multipleadditivetreesmodel",
-   "features":[
-       { "name" : "userTextTitleMatch"},
-       { "name" : "originalScore"}
-   ],
-   "params" : {
-       "trees" : [
-           {
-               "weight" : "1",
-               "root": {
-                   "feature" : "userTextTitleMatch",
-                   "threshold" : "0.5",
-                   "left" : {
-                       "value" : "-100"
-                   },
-                   "right" : {
-                       "feature" : "originalScore",
-                       "threshold" : "10.0",
-                       "left" : {
-                           "value" : "50"
-                       },
-                       "right" : {
-                           "value" : "75"
-                       }
-                   }
-               }
-           },
-           {
-               "weight" : "2",
-               "root" : {
-                   "value" : "-10"
-               }
-           }
-       ]
-   }
-}</pre>
- * <p>
- * Training libraries:
+ * A scoring model that computes scores based on the summation of multiple 
weighted trees. Example
+ * models are LambdaMART and Gradient Boosted Regression Trees (GBRT) .
+ *
+ * <p>Example configuration:
+ *
+ * <pre>{

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/interleaving/algorithms/TeamDraftInterleaving.java
##########
@@ -57,20 +57,18 @@
   }
 
   /**
-   * Team Draft Interleaving considers two ranking models: modelA and modelB.
-   * For a given query, each model returns its ranked list of documents La = 
(a1,a2,...) and Lb = (b1, b2, ...).
-   * The algorithm creates a unique ranked list I = (i1, i2, ...).
-   * This list is created by interleaving elements from the two lists la and 
lb as described by Chapelle et al.[1].
-   * Each element Ij is labelled TeamA if it is selected from La and TeamB if 
it is selected from Lb.
-   * <p>
-   * [1] O. Chapelle, T. Joachims, F. Radlinski, and Y. Yue.
-   * Large-scale validation and analysis of interleaved search evaluation. ACM 
TOIS, 30(1):1–41, Feb. (2012)
-   * <p>
-   * Assumptions:
-   * - rerankedA and rerankedB has the same length.
-   * They contains the same search results, ranked differently by two ranking 
models
-   * - each reranked list can not contain the same search result more than 
once.
-   * - results are all from the same shard
+   * Team Draft Interleaving considers two ranking models: modelA and modelB. 
For a given query,
+   * each model returns its ranked list of documents La = (a1,a2,...) and Lb = 
(b1, b2, ...). The
+   * algorithm creates a unique ranked list I = (i1, i2, ...). This list is 
created by interleaving
+   * elements from the two lists la and lb as described by Chapelle et al.[1]. 
Each element Ij is
+   * labelled TeamA if it is selected from La and TeamB if it is selected from 
Lb.
+   *
+   * <p>[1] O. Chapelle, T. Joachims, F. Radlinski, and Y. Yue. Large-scale 
validation and analysis
+   * of interleaved search evaluation. ACM TOIS, 30(1):1–41, Feb. (2012)
+   *
+   * <p>Assumptions: - rerankedA and rerankedB has the same length. They 
contains the same search
+   * results, ranked differently by two ranking models - each reranked list 
can not contain the same
+   * search result more than once. - results are all from the same shard

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/model/DefaultWrapperModel.java
##########
@@ -35,32 +34,38 @@
 /**
  * A scoring model that fetches the wrapped model from {@link 
SolrResourceLoader}.
  *
- * <p>This model uses {@link SolrResourceLoader#openResource(String)} for 
fetching the wrapped model.
- * If you give a relative path for {@code params/resource}, this model will 
try to load the wrapped model from
- * the instance directory (i.e. ${solr.solr.home}). Otherwise, seek through 
classpaths.
+ * <p>This model uses {@link SolrResourceLoader#openResource(String)} for 
fetching the wrapped
+ * model. If you give a relative path for {@code params/resource}, this model 
will try to load the
+ * wrapped model from the instance directory (i.e. ${solr.solr.home}). 
Otherwise, seek through
+ * classpaths.
  *
  * <p>Example configuration:
+ *
  * <pre>{
-  "class": "org.apache.solr.ltr.model.DefaultWrapperModel",
-  "name": "myWrapperModelName",
-  "params": {
-    "resource": "models/myModel.json"
-  }
-}</pre>
+ * "class": "org.apache.solr.ltr.model.DefaultWrapperModel",
+ * "name": "myWrapperModelName",
+ * "params": {
+ * "resource": "models/myModel.json"
+ * }
+ * }</pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/norm/MinMaxNormalizer.java
##########
@@ -20,19 +20,22 @@
 
 /**
  * A Normalizer to scale a feature value using a (min,max) range.
- * <p>
- * Example configuration:
-<pre>
-"norm" : {
-    "class" : "org.apache.solr.ltr.norm.MinMaxNormalizer",
-    "params" : { "min":"0", "max":"50" }
-}
-</pre>
+ *
+ * <p>Example configuration:
+ *
+ * <pre>

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/model/LinearModel.java
##########
@@ -19,79 +19,85 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.Explanation;
 import org.apache.solr.ltr.feature.Feature;
 import org.apache.solr.ltr.norm.Normalizer;
 
 /**
- * A scoring model that computes scores using a dot product.
- * Example models are RankSVM and Pranking.
- * <p>
- * Example configuration:
+ * A scoring model that computes scores using a dot product. Example models 
are RankSVM and
+ * Pranking.
+ *
+ * <p>Example configuration:
+ *
  * <pre>{

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/interleaving/algorithms/TeamDraftInterleaving.java
##########
@@ -22,31 +22,31 @@
 import java.util.LinkedList;
 import java.util.Random;
 import java.util.Set;
-
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.solr.ltr.interleaving.Interleaving;
 import org.apache.solr.ltr.interleaving.InterleavingResult;
 
 /**
- * Interleaving was introduced the first time by Joachims in [1, 2].
- * Team Draft Interleaving is among the most successful and used interleaving 
approaches[3].
- * Team Draft Interleaving implements a method similar to the way in which 
captains select their players in team-matches.
- * Team Draft Interleaving produces a fair distribution of ranking models’ 
elements in the final interleaved list.
- * "Team draft interleaving" has also proved to overcome an issue of the 
"Balanced interleaving" approach, in determining the winning model[4].
- * <p>
- * [1] T. Joachims. Optimizing search engines using clickthrough data. KDD 
(2002)
- * [2] 
T.Joachims.Evaluatingretrievalperformanceusingclickthroughdata.InJ.Franke, G. 
Nakhaeizadeh, and I. Renz, editors,
- * Text Mining, pages 79–96. Physica/Springer (2003)
- * [3] F. Radlinski, M. Kurup, and T. Joachims. How does clickthrough data 
reflect re-
- * trieval quality? In CIKM, pages 43–52. ACM Press (2008)
- * [4] O. Chapelle, T. Joachims, F. Radlinski, and Y. Yue.
- * Large-scale validation and analysis of interleaved search evaluation. ACM 
TOIS, 30(1):1–41, Feb. (2012)
+ * Interleaving was introduced the first time by Joachims in [1, 2]. Team 
Draft Interleaving is
+ * among the most successful and used interleaving approaches[3]. Team Draft 
Interleaving implements
+ * a method similar to the way in which captains select their players in 
team-matches. Team Draft
+ * Interleaving produces a fair distribution of ranking models’ elements in 
the final interleaved
+ * list. "Team draft interleaving" has also proved to overcome an issue of the 
"Balanced
+ * interleaving" approach, in determining the winning model[4].
+ *
+ * <p>[1] T. Joachims. Optimizing search engines using clickthrough data. KDD 
(2002) [2]
+ * T.Joachims.Evaluatingretrievalperformanceusingclickthroughdata.InJ.Franke, 
G. Nakhaeizadeh, and
+ * I. Renz, editors, Text Mining, pages 79–96. Physica/Springer (2003) [3] F. 
Radlinski, M. Kurup,
+ * and T. Joachims. How does clickthrough data reflect re- trieval quality? In 
CIKM, pages 43–52.
+ * ACM Press (2008) [4] O. Chapelle, T. Joachims, F. Radlinski, and Y. Yue. 
Large-scale validation
+ * and analysis of interleaved search evaluation. ACM TOIS, 30(1):1–41, Feb. 
(2012)

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/package-info.java
##########
@@ -16,30 +16,24 @@
  */
 
 /**
- * <p>
- * This package contains the main logic for performing the reranking using
- * a Learning to Rank model.
- * </p>
- * <p>
- * A model will be applied on each document through a {@link 
org.apache.solr.ltr.LTRScoringQuery}, a
- * subclass of {@link org.apache.lucene.search.Query}. As a normal query,
- * the learned model will produce a new score
- * for each document reranked.
- * </p>
- * <p>
- * A {@link org.apache.solr.ltr.LTRScoringQuery} is created by providing an 
instance of
- * {@link org.apache.solr.ltr.model.LTRScoringModel}. An instance of
- * {@link org.apache.solr.ltr.model.LTRScoringModel}
- * defines how to combine the features in order to create a new
- * score for a document. A new Learning to Rank model is plugged
- * into the framework  by extending {@link 
org.apache.solr.ltr.model.LTRScoringModel},
- * (see for example {@link 
org.apache.solr.ltr.model.MultipleAdditiveTreesModel} and {@link 
org.apache.solr.ltr.model.LinearModel}).
- * </p>
- * <p>
- * The {@link org.apache.solr.ltr.LTRScoringQuery} will take care of computing 
the values of
- * all the features (see {@link org.apache.solr.ltr.feature.Feature}) and then 
will delegate the final score
- * generation to the {@link org.apache.solr.ltr.model.LTRScoringModel}, by 
calling the method
- * {@link org.apache.solr.ltr.model.LTRScoringModel#score(float[] 
modelFeatureValuesNormalized) score(float[] modelFeatureValuesNormalized)}.
- * </p>
+ * This package contains the main logic for performing the reranking using a 
Learning to Rank model.

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/model/NeuralNetworkModel.java
##########
@@ -28,75 +27,79 @@
 
 /**
  * A scoring model that computes document scores using a neural network.
- * <p>
- * Supported <a 
href="https://en.wikipedia.org/wiki/Activation_function";>activation 
functions</a> are:
- * <code>identity</code>, <code>relu</code>, <code>sigmoid</code>, 
<code>tanh</code>, <code>leakyrelu</code> and
- * contributions to support additional activation functions are welcome.
- * <p>
- * Example configuration:
-<pre>{

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/response/transform/LTRFeatureLoggerTransformerFactory.java
##########
@@ -162,31 +161,31 @@ private FeatureLogger createFeatureLogger(String 
formatStr) {
 
   class FeatureTransformer extends DocTransformer {
 
-    final private String name;
-    final private SolrParams localparams;
-    final private SolrQueryRequest req;
-    final private boolean hasExplicitFeatureStore;
+    private final String name;
+    private final SolrParams localparams;
+    private final SolrQueryRequest req;
+    private final boolean hasExplicitFeatureStore;
 
     private List<LeafReaderContext> leafContexts;
     private SolrIndexSearcher searcher;
     /**
-     * rerankingQueries, modelWeights have:
-     * length=1 - [Classic LTR] When reranking with a single model
-     * length=2 - [Interleaving] When reranking with interleaving (two ranking 
models are involved)
+     * rerankingQueries, modelWeights have: length=1 - [Classic LTR] When 
reranking with a single
+     * model length=2 - [Interleaving] When reranking with interleaving (two 
ranking models are
+     * involved)

Review comment:
       this comment auto-reformatting impairs content clarity.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/model/LTRScoringModel.java
##########
@@ -38,71 +37,81 @@
 
 /**
  * A scoring model computes scores that can be used to rerank documents.
- * <p>
- * A scoring model consists of
+ *
+ * <p>A scoring model consists of
+ *
  * <ul>
- * <li> a list of features ({@link Feature}) and
- * <li> a list of normalizers ({@link Normalizer}) plus
- * <li> parameters or configuration to represent the scoring algorithm.
+ *   <li>a list of features ({@link Feature}) and
+ *   <li>a list of normalizers ({@link Normalizer}) plus
+ *   <li>parameters or configuration to represent the scoring algorithm.
  * </ul>
- * <p>
- * Example configuration (snippet):
+ *
+ * <p>Example configuration (snippet):
+ *
  * <pre>{

Review comment:
       note to self: check this is still rendered correctly

##########
File path: solr/contrib/ltr/src/java/org/apache/solr/ltr/store/FeatureStore.java
##########
@@ -20,16 +20,16 @@
 import java.util.Collections;
 import java.util.LinkedHashMap;
 import java.util.List;
-
 import org.apache.solr.ltr.feature.Feature;
 import org.apache.solr.ltr.feature.FeatureException;
 
 public class FeatureStore {
 
-  /** the name of the default feature store **/
+  /** the name of the default feature store * */
   public static final String DEFAULT_FEATURE_STORE_NAME = "_DEFAULT_";
 
-  private final LinkedHashMap<String,Feature> store = new LinkedHashMap<>(); 
// LinkedHashMap because we need predictable iteration order
+  private final LinkedHashMap<String, Feature> store =
+      new LinkedHashMap<>(); // LinkedHashMap because we need predictable 
iteration order

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/response/transform/LTRFeatureLoggerTransformerFactory.java
##########
@@ -51,18 +50,19 @@
 import org.apache.solr.util.SolrPluginUtils;
 
 /**

Review comment:
       note to self: check this is still rendered correctly

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/store/rest/ManagedFeatureStore.java
##########
@@ -170,41 +162,41 @@ public void doGet(BaseSolrResource endpoint, String 
childId) {
     } else {
       final FeatureStore store = getFeatureStore(childId);
       if (store == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "missing feature store [" + childId + "]");
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST, "missing feature store [" + 
childId + "]");
       }
-      response.add(FEATURES_JSON_FIELD,
-          featuresAsManagedResources(store));
+      response.add(FEATURES_JSON_FIELD, featuresAsManagedResources(store));
     }
   }
 
   private static List<Object> featuresAsManagedResources(FeatureStore store) {
     final List<Feature> storedFeatures = store.getFeatures();
     final List<Object> features = new ArrayList<Object>(storedFeatures.size());
     for (final Feature f : storedFeatures) {
-      final LinkedHashMap<String,Object> m = toFeatureMap(f);
+      final LinkedHashMap<String, Object> m = toFeatureMap(f);
       m.put(FEATURE_STORE_NAME_KEY, store.getName());
       features.add(m);
     }
     return features;
   }
 
-  private static LinkedHashMap<String,Object> toFeatureMap(Feature feat) {
-    final LinkedHashMap<String,Object> o = new LinkedHashMap<>(4, 1.0f); // 1 
extra for caller to add store
+  private static LinkedHashMap<String, Object> toFeatureMap(Feature feat) {
+    final LinkedHashMap<String, Object> o =
+        new LinkedHashMap<>(4, 1.0f); // 1 extra for caller to add store

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/response/transform/LTRFeatureLoggerTransformerFactory.java
##########
@@ -210,99 +209,120 @@ public void setContext(ResultContext context) {
 
       searcher = context.getSearcher();
       if (searcher == null) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "searcher is null");
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "searcher 
is null");
       }
       leafContexts = searcher.getTopReaderContext().leaves();
       if (threadManager != null) {
-        
threadManager.setExecutor(context.getRequest().getCore().getCoreContainer().getUpdateShardHandler().getUpdateExecutor());
+        threadManager.setExecutor(
+            context
+                .getRequest()
+                .getCore()
+                .getCoreContainer()
+                .getUpdateShardHandler()
+                .getUpdateExecutor());
       }
 
       rerankingQueriesFromContext = 
SolrQueryRequestContextUtils.getScoringQueries(req);
-      docsWereNotReranked = (rerankingQueriesFromContext == null || 
rerankingQueriesFromContext.length == 0);
+      docsWereNotReranked =
+          (rerankingQueriesFromContext == null || 
rerankingQueriesFromContext.length == 0);
       String transformerFeatureStore = 
SolrQueryRequestContextUtils.getFvStoreName(req);
-      Map<String, String[]> transformerExternalFeatureInfo = 
LTRQParserPlugin.extractEFIParams(localparams);
+      Map<String, String[]> transformerExternalFeatureInfo =
+          LTRQParserPlugin.extractEFIParams(localparams);
 
       final LoggingModel loggingModel = 
createLoggingModel(transformerFeatureStore);
-      setupRerankingQueriesForLogging(transformerFeatureStore, 
transformerExternalFeatureInfo, loggingModel);
+      setupRerankingQueriesForLogging(
+          transformerFeatureStore, transformerExternalFeatureInfo, 
loggingModel);
       setupRerankingWeightsForLogging(context);
     }
 
     /**
-     * The loggingModel is an empty model that is just used to extract the 
features
-     * and log them
+     * The loggingModel is an empty model that is just used to extract the 
features and log them
+     *
      * @param transformerFeatureStore the explicit transformer feature store
      */
     private LoggingModel createLoggingModel(String transformerFeatureStore) {
       final ManagedFeatureStore fr = 
ManagedFeatureStore.getManagedFeatureStore(req.getCore());
 
       final FeatureStore store = fr.getFeatureStore(transformerFeatureStore);
-      transformerFeatureStore = store.getName(); // if transformerFeatureStore 
was null before this gets actual name
+      transformerFeatureStore =
+          store.getName(); // if transformerFeatureStore was null before this 
gets actual name

Review comment:
       perhaps moving the comment to a separate line would be clearer here too.

##########
File path: 
solr/contrib/ltr/src/java/org/apache/solr/ltr/norm/StandardNormalizer.java
##########
@@ -20,20 +20,22 @@
 
 /**
  * A Normalizer to scale a feature value around an 
average-and-standard-deviation distribution.
- * <p>
- * Example configuration:
-<pre>
-"norm" : {
-    "class" : "org.apache.solr.ltr.norm.StandardNormalizer",
-    "params" : { "avg":"42", "std":"6" }
-}
-</pre>
- * <p>
- * Example normalizations:
+ *
+ * <p>Example configuration:
+ *
+ * <pre>

Review comment:
       note to self: check this is still rendered correctly




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org
For additional commands, e-mail: issues-h...@solr.apache.org

Reply via email to