sonatype-lift[bot] commented on code in PR #1458:
URL: https://github.com/apache/solr/pull/1458#discussion_r1146943436


##########
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java:
##########
@@ -0,0 +1,358 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import org.apache.lucene.store.Directory;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.CoreDescriptor;
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for testing the "Install Shard API" with various backup 
repositories.
+ *
+ * <p>Subclasses are expected to bootstrap a Solr cluster with a single 
configured backup
+ * repository. This base-class will populate that backup repository all data 
necessary for these
+ * tests.
+ *
+ * @see org.apache.solr.handler.admin.api.InstallShardDataAPI
+ */
+public abstract class AbstractInstallShardTest extends SolrCloudTestCase {
+
+  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final String INSTALL_DATA_BASE_LOCATION = "/";
+  protected static final String BACKUP_REPO_NAME = "trackingBackupRepository";
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void seedDocGenerator() {
+    docsSeed = random().nextLong();
+    System.setProperty("solr.directoryFactory", 
"solr.StandardDirectoryFactory");
+  }
+
+  // Populated by 'bootstrapBackupRepositoryData'
+  private static int singleShardNumDocs = -1;
+  private static int replicasPerShard = -1;
+  private static int multiShardNumDocs = -1;
+  private static URI singleShard1Uri = null;
+  private static URI[] multiShardUris = null;
+
+  public static void bootstrapBackupRepositoryData(String 
baseRepositoryLocation) throws Exception {
+    final int numShards = random().nextInt(3) + 2;
+    multiShardUris = new URI[numShards];
+    replicasPerShard = random().nextInt(3) + 1;
+    // replicasPerShard = 1;
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    // Create collections and index docs
+    final String singleShardCollName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+    singleShardNumDocs = indexDocs(singleShardCollName, true);
+    assertCollectionHasNumDocs(singleShardCollName, singleShardNumDocs);
+    final String multiShardCollName = createAndAwaitEmptyCollection(numShards, 
replicasPerShard);
+    multiShardNumDocs = indexDocs(multiShardCollName, true);
+    assertCollectionHasNumDocs(multiShardCollName, multiShardNumDocs);
+
+    // Upload shard data to BackupRepository - single shard collection
+    singleShard1Uri =
+        createBackupRepoDirectoryForShardData(
+            baseRepositoryLocation, singleShardCollName, "shard1");
+    copyShardDataToBackupRepository(singleShardCollName, "shard1", 
singleShard1Uri);
+    // Upload shard data to BackupRepository - multi-shard collection
+    for (int i = 0; i < multiShardUris.length; i++) {
+      final String shardName = "shard" + (i + 1);
+      multiShardUris[i] =
+          createBackupRepoDirectoryForShardData(
+              baseRepositoryLocation, multiShardCollName, shardName);
+      copyShardDataToBackupRepository(multiShardCollName, shardName, 
multiShardUris[i]);
+    }
+
+    // Nuke collections now that we've populated the BackupRepository
+    
CollectionAdminRequest.deleteCollection(singleShardCollName).process(solrClient);
+    
CollectionAdminRequest.deleteCollection(multiShardCollName).process(solrClient);
+  }
+
+  @Test
+  public void testInstallFailsIfCollectionIsNotInReadOnlyMode() throws 
Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    final BaseHttpSolrClient.RemoteSolrException rse =
+        expectThrows(
+            BaseHttpSolrClient.RemoteSolrException.class,
+            () -> {
+              CollectionAdminRequest.installDataToShard(
+                      collectionName, "shard1", singleShardLocation, 
BACKUP_REPO_NAME)
+                  .process(cluster.getSolrClient());
+            });
+    assertEquals(400, rse.code());
+    assertTrue(rse.getMessage().contains("Collection must be in readOnly 
mode"));
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, 0);
+  }
+
+  @Test
+  public void testInstallToSingleShardCollection() throws Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+    enableReadOnly(collectionName);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    CollectionAdminRequest.installDataToShard(
+            collectionName, "shard1", singleShardLocation, BACKUP_REPO_NAME)
+        .process(cluster.getSolrClient());
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, singleShardNumDocs);
+  }
+
+  @Test
+  public void testSerialInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    for (int i = 1; i <= multiShardUris.length; i++) {
+      CollectionAdminRequest.installDataToShard(
+              collectionName, "shard" + i, multiShardUris[i - 1].toString(), 
BACKUP_REPO_NAME)
+          .process(cluster.getSolrClient());
+    }
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  @Test
+  public void testParallelInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    runParallelShardInstalls(collectionName, multiShardUris);
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  /**
+   * Builds a string representation of a valid solr.xml configuration, with 
the provided
+   * backup-repository configuration inserted
+   *
+   * @param backupRepositoryText a string representing the 'backup' XML tag to 
put in the
+   *     constructed solr.xml
+   */
+  public static String defaultSolrXmlTextWithBackupRepository(String 
backupRepositoryText) {
+    return "<solr>\n"
+        + "\n"
+        + "  <str name=\"shareSchema\">${shareSchema:false}</str>\n"
+        + "  <str 
name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n"
+        + "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n"
+        + "\n"
+        + "  <shardHandlerFactory name=\"shardHandlerFactory\" 
class=\"HttpShardHandlerFactory\">\n"
+        + "    <str name=\"urlScheme\">${urlScheme:}</str>\n"
+        + "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n"
+        + "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n"
+        + "  </shardHandlerFactory>\n"
+        + "\n"
+        + "  <solrcloud>\n"
+        + "    <str name=\"host\">127.0.0.1</str>\n"
+        + "    <int name=\"hostPort\">${hostPort:8983}</int>\n"
+        + "    <str name=\"hostContext\">${hostContext:solr}</str>\n"
+        + "    <int 
name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n"
+        + "    <bool 
name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n"
+        + "    <int name=\"leaderVoteWait\">10000</int>\n"
+        + "    <int 
name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n"
+        + "    <int 
name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n"
+        + "  </solrcloud>\n"
+        + "  \n"
+        + backupRepositoryText
+        + "  \n"
+        + "</solr>\n";
+  }
+
+  private static void assertCollectionHasNumDocs(String collection, int 
expectedNumDocs)
+      throws Exception {
+    final SolrClient solrClient = cluster.getSolrClient();
+    assertEquals(
+        expectedNumDocs,
+        solrClient.query(collection, new 
SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  private static void copyShardDataToBackupRepository(
+      String collectionName, String shardName, URI destinationUri) throws 
Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    final Collection<String> coreNames = cc.getAllCoreNames();
+    final String coreName =
+        coreNames.stream()
+            .filter(name -> name.contains(collectionName) && 
name.contains(shardName))
+            .findFirst()
+            .get();
+    final CoreDescriptor cd = cc.getCoreDescriptor(coreName);
+    final Path coreInstanceDir = cd.getInstanceDir();
+    assert coreInstanceDir.toFile().exists();
+    assert coreInstanceDir.toFile().isDirectory();
+
+    final Path coreIndexDir = coreInstanceDir.resolve("data").resolve("index");
+    assert coreIndexDir.toFile().exists();
+    assert coreIndexDir.toFile().isDirectory();
+
+    try (final BackupRepository backupRepository = 
cc.newBackupRepository(BACKUP_REPO_NAME);
+        final SolrCore core = cc.getCore(coreName)) {
+      final Directory dir =
+          core.getDirectoryFactory()
+              .get(
+                  coreIndexDir.toString(),
+                  DirectoryFactory.DirContext.DEFAULT,
+                  core.getSolrConfig().indexConfig.lockType);
+      try {
+        for (final String dirContent : dir.listAll()) {
+          if (dirContent.contains("write.lock")) continue;
+          backupRepository.copyFileFrom(dir, dirContent, destinationUri);
+        }
+      } finally {
+        core.getDirectoryFactory().release(dir);
+      }
+    }
+  }
+
+  private static URI createBackupRepoDirectoryForShardData(
+      String baseLocation, String collectionName, String shardName) throws 
Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    try (final BackupRepository backupRepository = 
cc.newBackupRepository(BACKUP_REPO_NAME)) {
+      final URI baseLocationUri = backupRepository.createURI(baseLocation);
+      final URI collectionLocation = backupRepository.resolve(baseLocationUri, 
collectionName);
+      backupRepository.createDirectory(collectionLocation);
+      final URI shardLocation = backupRepository.resolve(collectionLocation, 
shardName);
+      backupRepository.createDirectory(shardLocation);
+      return shardLocation;
+    }
+  }
+
+  private static int indexDocs(String collectionName, boolean useUUID) throws 
Exception {
+    Random random =

Review Comment:
   <picture><img alt="12% of developers fix this issue" 
src="https://lift.sonatype.com/api/commentimage/fixrate/12/display.svg";></picture>
   
   
<b>*[PREDICTABLE_RANDOM](https://find-sec-bugs.github.io/bugs.htm#PREDICTABLE_RANDOM):</b>*
  This random generator (java.util.Random) is predictable
   
   ---
   
   <details><summary>ℹī¸ Expand to see all <b>@sonatype-lift</b> 
commands</summary>
   
   You can reply with the following commands. For example, reply with 
***@sonatype-lift ignoreall*** to leave out all findings.
   | **Command** | **Usage** |
   | ------------- | ------------- |
   | `@sonatype-lift ignore` | Leave out the above finding from this PR |
   | `@sonatype-lift ignoreall` | Leave out all the existing findings from this 
PR |
   | `@sonatype-lift exclude <file\|issue\|path\|tool>` | Exclude specified 
`file\|issue\|path\|tool` from Lift findings by updating your config.toml file |
   
   **Note:** When talking to LiftBot, you need to **refresh** the page to see 
its response.
   <sub>[Click here](https://github.com/apps/sonatype-lift/installations/new) 
to add LiftBot to another repo.</sub></details>
   
   
   
   ---
   
   <b>Help us improve LIFT! (<i>Sonatype LiftBot external survey</i>)</b>
   
   Was this a good recommendation for you? <sub><small>Answering this survey 
will not impact your Lift settings.</small></sub>
   
   [ [🙁 Not 
relevant](https://www.sonatype.com/lift-comment-rating?comment=451880815&lift_comment_rating=1)
 ] - [ [😕 Won't 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880815&lift_comment_rating=2)
 ] - [ [😑 Not critical, will 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880815&lift_comment_rating=3)
 ] - [ [🙂 Critical, will 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880815&lift_comment_rating=4)
 ] - [ [😊 Critical, fixing 
now](https://www.sonatype.com/lift-comment-rating?comment=451880815&lift_comment_rating=5)
 ]



##########
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java:
##########
@@ -0,0 +1,358 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import org.apache.lucene.store.Directory;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.CoreDescriptor;
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for testing the "Install Shard API" with various backup 
repositories.
+ *
+ * <p>Subclasses are expected to bootstrap a Solr cluster with a single 
configured backup
+ * repository. This base-class will populate that backup repository all data 
necessary for these
+ * tests.
+ *
+ * @see org.apache.solr.handler.admin.api.InstallShardDataAPI
+ */
+public abstract class AbstractInstallShardTest extends SolrCloudTestCase {
+
+  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final String INSTALL_DATA_BASE_LOCATION = "/";
+  protected static final String BACKUP_REPO_NAME = "trackingBackupRepository";
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void seedDocGenerator() {
+    docsSeed = random().nextLong();
+    System.setProperty("solr.directoryFactory", 
"solr.StandardDirectoryFactory");
+  }
+
+  // Populated by 'bootstrapBackupRepositoryData'
+  private static int singleShardNumDocs = -1;
+  private static int replicasPerShard = -1;
+  private static int multiShardNumDocs = -1;
+  private static URI singleShard1Uri = null;
+  private static URI[] multiShardUris = null;
+
+  public static void bootstrapBackupRepositoryData(String 
baseRepositoryLocation) throws Exception {
+    final int numShards = random().nextInt(3) + 2;
+    multiShardUris = new URI[numShards];
+    replicasPerShard = random().nextInt(3) + 1;
+    // replicasPerShard = 1;
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    // Create collections and index docs
+    final String singleShardCollName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+    singleShardNumDocs = indexDocs(singleShardCollName, true);
+    assertCollectionHasNumDocs(singleShardCollName, singleShardNumDocs);
+    final String multiShardCollName = createAndAwaitEmptyCollection(numShards, 
replicasPerShard);
+    multiShardNumDocs = indexDocs(multiShardCollName, true);
+    assertCollectionHasNumDocs(multiShardCollName, multiShardNumDocs);
+
+    // Upload shard data to BackupRepository - single shard collection
+    singleShard1Uri =
+        createBackupRepoDirectoryForShardData(
+            baseRepositoryLocation, singleShardCollName, "shard1");
+    copyShardDataToBackupRepository(singleShardCollName, "shard1", 
singleShard1Uri);
+    // Upload shard data to BackupRepository - multi-shard collection
+    for (int i = 0; i < multiShardUris.length; i++) {
+      final String shardName = "shard" + (i + 1);
+      multiShardUris[i] =
+          createBackupRepoDirectoryForShardData(
+              baseRepositoryLocation, multiShardCollName, shardName);
+      copyShardDataToBackupRepository(multiShardCollName, shardName, 
multiShardUris[i]);
+    }
+
+    // Nuke collections now that we've populated the BackupRepository
+    
CollectionAdminRequest.deleteCollection(singleShardCollName).process(solrClient);
+    
CollectionAdminRequest.deleteCollection(multiShardCollName).process(solrClient);
+  }
+
+  @Test
+  public void testInstallFailsIfCollectionIsNotInReadOnlyMode() throws 
Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    final BaseHttpSolrClient.RemoteSolrException rse =
+        expectThrows(
+            BaseHttpSolrClient.RemoteSolrException.class,
+            () -> {
+              CollectionAdminRequest.installDataToShard(
+                      collectionName, "shard1", singleShardLocation, 
BACKUP_REPO_NAME)
+                  .process(cluster.getSolrClient());
+            });
+    assertEquals(400, rse.code());
+    assertTrue(rse.getMessage().contains("Collection must be in readOnly 
mode"));
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, 0);
+  }
+
+  @Test
+  public void testInstallToSingleShardCollection() throws Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+    enableReadOnly(collectionName);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    CollectionAdminRequest.installDataToShard(
+            collectionName, "shard1", singleShardLocation, BACKUP_REPO_NAME)
+        .process(cluster.getSolrClient());
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, singleShardNumDocs);
+  }
+
+  @Test
+  public void testSerialInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    for (int i = 1; i <= multiShardUris.length; i++) {
+      CollectionAdminRequest.installDataToShard(
+              collectionName, "shard" + i, multiShardUris[i - 1].toString(), 
BACKUP_REPO_NAME)
+          .process(cluster.getSolrClient());
+    }
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  @Test
+  public void testParallelInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    runParallelShardInstalls(collectionName, multiShardUris);
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  /**
+   * Builds a string representation of a valid solr.xml configuration, with 
the provided
+   * backup-repository configuration inserted
+   *
+   * @param backupRepositoryText a string representing the 'backup' XML tag to 
put in the
+   *     constructed solr.xml
+   */
+  public static String defaultSolrXmlTextWithBackupRepository(String 
backupRepositoryText) {
+    return "<solr>\n"
+        + "\n"
+        + "  <str name=\"shareSchema\">${shareSchema:false}</str>\n"
+        + "  <str 
name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n"
+        + "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n"
+        + "\n"
+        + "  <shardHandlerFactory name=\"shardHandlerFactory\" 
class=\"HttpShardHandlerFactory\">\n"
+        + "    <str name=\"urlScheme\">${urlScheme:}</str>\n"
+        + "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n"
+        + "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n"
+        + "  </shardHandlerFactory>\n"
+        + "\n"
+        + "  <solrcloud>\n"
+        + "    <str name=\"host\">127.0.0.1</str>\n"
+        + "    <int name=\"hostPort\">${hostPort:8983}</int>\n"
+        + "    <str name=\"hostContext\">${hostContext:solr}</str>\n"
+        + "    <int 
name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n"
+        + "    <bool 
name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n"
+        + "    <int name=\"leaderVoteWait\">10000</int>\n"
+        + "    <int 
name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n"
+        + "    <int 
name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n"
+        + "  </solrcloud>\n"
+        + "  \n"
+        + backupRepositoryText
+        + "  \n"
+        + "</solr>\n";
+  }
+
+  private static void assertCollectionHasNumDocs(String collection, int 
expectedNumDocs)
+      throws Exception {
+    final SolrClient solrClient = cluster.getSolrClient();
+    assertEquals(
+        expectedNumDocs,
+        solrClient.query(collection, new 
SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  private static void copyShardDataToBackupRepository(
+      String collectionName, String shardName, URI destinationUri) throws 
Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    final Collection<String> coreNames = cc.getAllCoreNames();
+    final String coreName =
+        coreNames.stream()
+            .filter(name -> name.contains(collectionName) && 
name.contains(shardName))
+            .findFirst()
+            .get();
+    final CoreDescriptor cd = cc.getCoreDescriptor(coreName);
+    final Path coreInstanceDir = cd.getInstanceDir();
+    assert coreInstanceDir.toFile().exists();
+    assert coreInstanceDir.toFile().isDirectory();
+
+    final Path coreIndexDir = coreInstanceDir.resolve("data").resolve("index");
+    assert coreIndexDir.toFile().exists();
+    assert coreIndexDir.toFile().isDirectory();
+
+    try (final BackupRepository backupRepository = 
cc.newBackupRepository(BACKUP_REPO_NAME);
+        final SolrCore core = cc.getCore(coreName)) {
+      final Directory dir =
+          core.getDirectoryFactory()
+              .get(
+                  coreIndexDir.toString(),
+                  DirectoryFactory.DirContext.DEFAULT,
+                  core.getSolrConfig().indexConfig.lockType);
+      try {
+        for (final String dirContent : dir.listAll()) {
+          if (dirContent.contains("write.lock")) continue;
+          backupRepository.copyFileFrom(dir, dirContent, destinationUri);
+        }
+      } finally {
+        core.getDirectoryFactory().release(dir);
+      }
+    }
+  }
+
+  private static URI createBackupRepoDirectoryForShardData(
+      String baseLocation, String collectionName, String shardName) throws 
Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    try (final BackupRepository backupRepository = 
cc.newBackupRepository(BACKUP_REPO_NAME)) {

Review Comment:
   <picture><img alt="15% of developers fix this issue" 
src="https://lift.sonatype.com/api/commentimage/fixrate/15/display.svg";></picture>
   
   <b>*NULL_DEREFERENCE:</b>*  object `cc` last assigned on line 262 could be 
null and is dereferenced at line 263.
   
   ❗❗ <b>2 similar findings have been found in this PR</b>
   
   <details><summary>🔎 Expand here to view all instances of this 
finding</summary><br/>
     
     
   <div align=\"center\">
   
   
   | **File Path** | **Line Number** |
   | ------------- | ------------- |
   | 
solr/core/src/java/org/apache/solr/handler/admin/api/InstallShardDataAPI.java | 
[100](https://github.com/apache/solr/blob/4fd67205b96c5c4b290b14c15c691b17fbff6495/solr/core/src/java/org/apache/solr/handler/admin/api/InstallShardDataAPI.java#L100)
 |
   | 
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java
 | 
[226](https://github.com/apache/solr/blob/4fd67205b96c5c4b290b14c15c691b17fbff6495/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java#L226)
 |
   <p><a 
href="https://lift.sonatype.com/results/github.com/apache/solr/01GW86YY8J27H55NP8ZPK636RD?t=Infer|NULL_DEREFERENCE"
 target="_blank">Visit the Lift Web Console</a> to find more details in your 
report.</p></div></details>
   
   
   
   ---
   
   <details><summary>ℹī¸ Expand to see all <b>@sonatype-lift</b> 
commands</summary>
   
   You can reply with the following commands. For example, reply with 
***@sonatype-lift ignoreall*** to leave out all findings.
   | **Command** | **Usage** |
   | ------------- | ------------- |
   | `@sonatype-lift ignore` | Leave out the above finding from this PR |
   | `@sonatype-lift ignoreall` | Leave out all the existing findings from this 
PR |
   | `@sonatype-lift exclude <file\|issue\|path\|tool>` | Exclude specified 
`file\|issue\|path\|tool` from Lift findings by updating your config.toml file |
   
   **Note:** When talking to LiftBot, you need to **refresh** the page to see 
its response.
   <sub>[Click here](https://github.com/apps/sonatype-lift/installations/new) 
to add LiftBot to another repo.</sub></details>
   
   
   
   ---
   
   <b>Help us improve LIFT! (<i>Sonatype LiftBot external survey</i>)</b>
   
   Was this a good recommendation for you? <sub><small>Answering this survey 
will not impact your Lift settings.</small></sub>
   
   [ [🙁 Not 
relevant](https://www.sonatype.com/lift-comment-rating?comment=451880978&lift_comment_rating=1)
 ] - [ [😕 Won't 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880978&lift_comment_rating=2)
 ] - [ [😑 Not critical, will 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880978&lift_comment_rating=3)
 ] - [ [🙂 Critical, will 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880978&lift_comment_rating=4)
 ] - [ [😊 Critical, fixing 
now](https://www.sonatype.com/lift-comment-rating?comment=451880978&lift_comment_rating=5)
 ]



##########
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java:
##########
@@ -0,0 +1,358 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import org.apache.lucene.store.Directory;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.CoreDescriptor;
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for testing the "Install Shard API" with various backup 
repositories.
+ *
+ * <p>Subclasses are expected to bootstrap a Solr cluster with a single 
configured backup
+ * repository. This base-class will populate that backup repository all data 
necessary for these
+ * tests.
+ *
+ * @see org.apache.solr.handler.admin.api.InstallShardDataAPI
+ */
+public abstract class AbstractInstallShardTest extends SolrCloudTestCase {
+
+  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final String INSTALL_DATA_BASE_LOCATION = "/";
+  protected static final String BACKUP_REPO_NAME = "trackingBackupRepository";
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void seedDocGenerator() {
+    docsSeed = random().nextLong();
+    System.setProperty("solr.directoryFactory", 
"solr.StandardDirectoryFactory");
+  }
+
+  // Populated by 'bootstrapBackupRepositoryData'
+  private static int singleShardNumDocs = -1;
+  private static int replicasPerShard = -1;
+  private static int multiShardNumDocs = -1;
+  private static URI singleShard1Uri = null;
+  private static URI[] multiShardUris = null;
+
+  public static void bootstrapBackupRepositoryData(String 
baseRepositoryLocation) throws Exception {
+    final int numShards = random().nextInt(3) + 2;
+    multiShardUris = new URI[numShards];
+    replicasPerShard = random().nextInt(3) + 1;
+    // replicasPerShard = 1;
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    // Create collections and index docs
+    final String singleShardCollName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+    singleShardNumDocs = indexDocs(singleShardCollName, true);
+    assertCollectionHasNumDocs(singleShardCollName, singleShardNumDocs);
+    final String multiShardCollName = createAndAwaitEmptyCollection(numShards, 
replicasPerShard);
+    multiShardNumDocs = indexDocs(multiShardCollName, true);
+    assertCollectionHasNumDocs(multiShardCollName, multiShardNumDocs);
+
+    // Upload shard data to BackupRepository - single shard collection
+    singleShard1Uri =
+        createBackupRepoDirectoryForShardData(
+            baseRepositoryLocation, singleShardCollName, "shard1");
+    copyShardDataToBackupRepository(singleShardCollName, "shard1", 
singleShard1Uri);
+    // Upload shard data to BackupRepository - multi-shard collection
+    for (int i = 0; i < multiShardUris.length; i++) {
+      final String shardName = "shard" + (i + 1);
+      multiShardUris[i] =
+          createBackupRepoDirectoryForShardData(
+              baseRepositoryLocation, multiShardCollName, shardName);
+      copyShardDataToBackupRepository(multiShardCollName, shardName, 
multiShardUris[i]);
+    }
+
+    // Nuke collections now that we've populated the BackupRepository
+    
CollectionAdminRequest.deleteCollection(singleShardCollName).process(solrClient);
+    
CollectionAdminRequest.deleteCollection(multiShardCollName).process(solrClient);
+  }
+
+  @Test
+  public void testInstallFailsIfCollectionIsNotInReadOnlyMode() throws 
Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    final BaseHttpSolrClient.RemoteSolrException rse =
+        expectThrows(
+            BaseHttpSolrClient.RemoteSolrException.class,
+            () -> {
+              CollectionAdminRequest.installDataToShard(
+                      collectionName, "shard1", singleShardLocation, 
BACKUP_REPO_NAME)
+                  .process(cluster.getSolrClient());
+            });
+    assertEquals(400, rse.code());
+    assertTrue(rse.getMessage().contains("Collection must be in readOnly 
mode"));
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, 0);
+  }
+
+  @Test
+  public void testInstallToSingleShardCollection() throws Exception {
+    final String collectionName = createAndAwaitEmptyCollection(1, 
replicasPerShard);
+    enableReadOnly(collectionName);
+
+    final String singleShardLocation = singleShard1Uri.toString();
+    CollectionAdminRequest.installDataToShard(
+            collectionName, "shard1", singleShardLocation, BACKUP_REPO_NAME)
+        .process(cluster.getSolrClient());
+
+    // Shard-install has failed so collection should still be empty.
+    assertCollectionHasNumDocs(collectionName, singleShardNumDocs);
+  }
+
+  @Test
+  public void testSerialInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    for (int i = 1; i <= multiShardUris.length; i++) {
+      CollectionAdminRequest.installDataToShard(
+              collectionName, "shard" + i, multiShardUris[i - 1].toString(), 
BACKUP_REPO_NAME)
+          .process(cluster.getSolrClient());
+    }
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  @Test
+  public void testParallelInstallToMultiShardCollection() throws Exception {
+    final String collectionName =
+        createAndAwaitEmptyCollection(multiShardUris.length, replicasPerShard);
+    enableReadOnly(collectionName);
+
+    runParallelShardInstalls(collectionName, multiShardUris);
+
+    assertCollectionHasNumDocs(collectionName, multiShardNumDocs);
+  }
+
+  /**
+   * Builds a string representation of a valid solr.xml configuration, with 
the provided
+   * backup-repository configuration inserted
+   *
+   * @param backupRepositoryText a string representing the 'backup' XML tag to 
put in the
+   *     constructed solr.xml
+   */
+  public static String defaultSolrXmlTextWithBackupRepository(String 
backupRepositoryText) {
+    return "<solr>\n"
+        + "\n"
+        + "  <str name=\"shareSchema\">${shareSchema:false}</str>\n"
+        + "  <str 
name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n"
+        + "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n"
+        + "\n"
+        + "  <shardHandlerFactory name=\"shardHandlerFactory\" 
class=\"HttpShardHandlerFactory\">\n"
+        + "    <str name=\"urlScheme\">${urlScheme:}</str>\n"
+        + "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n"
+        + "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n"
+        + "  </shardHandlerFactory>\n"
+        + "\n"
+        + "  <solrcloud>\n"
+        + "    <str name=\"host\">127.0.0.1</str>\n"
+        + "    <int name=\"hostPort\">${hostPort:8983}</int>\n"
+        + "    <str name=\"hostContext\">${hostContext:solr}</str>\n"
+        + "    <int 
name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n"
+        + "    <bool 
name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n"
+        + "    <int name=\"leaderVoteWait\">10000</int>\n"
+        + "    <int 
name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n"
+        + "    <int 
name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n"
+        + "  </solrcloud>\n"
+        + "  \n"
+        + backupRepositoryText
+        + "  \n"
+        + "</solr>\n";
+  }
+
+  private static void assertCollectionHasNumDocs(String collection, int 
expectedNumDocs)
+      throws Exception {
+    final SolrClient solrClient = cluster.getSolrClient();
+    assertEquals(
+        expectedNumDocs,
+        solrClient.query(collection, new 
SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  private static void copyShardDataToBackupRepository(
+      String collectionName, String shardName, URI destinationUri) throws 
Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    final Collection<String> coreNames = cc.getAllCoreNames();
+    final String coreName =
+        coreNames.stream()
+            .filter(name -> name.contains(collectionName) && 
name.contains(shardName))
+            .findFirst()
+            .get();
+    final CoreDescriptor cd = cc.getCoreDescriptor(coreName);
+    final Path coreInstanceDir = cd.getInstanceDir();
+    assert coreInstanceDir.toFile().exists();
+    assert coreInstanceDir.toFile().isDirectory();
+
+    final Path coreIndexDir = coreInstanceDir.resolve("data").resolve("index");
+    assert coreIndexDir.toFile().exists();
+    assert coreIndexDir.toFile().isDirectory();
+
+    try (final BackupRepository backupRepository = 
cc.newBackupRepository(BACKUP_REPO_NAME);
+        final SolrCore core = cc.getCore(coreName)) {
+      final Directory dir =
+          core.getDirectoryFactory()
+              .get(
+                  coreIndexDir.toString(),
+                  DirectoryFactory.DirContext.DEFAULT,
+                  core.getSolrConfig().indexConfig.lockType);
+      try {
+        for (final String dirContent : dir.listAll()) {
+          if (dirContent.contains("write.lock")) continue;
+          backupRepository.copyFileFrom(dir, dirContent, destinationUri);
+        }
+      } finally {
+        core.getDirectoryFactory().release(dir);
+      }
+    }
+  }
+
+  private static URI createBackupRepoDirectoryForShardData(
+      String baseLocation, String collectionName, String shardName) throws 
Exception {
+    final CoreContainer cc = cluster.getJettySolrRunner(0).getCoreContainer();
+    try (final BackupRepository backupRepository = 
cc.newBackupRepository(BACKUP_REPO_NAME)) {

Review Comment:
   <picture><img alt="5% of developers fix this issue" 
src="https://lift.sonatype.com/api/commentimage/fixrate/5/display.svg";></picture>
   
   <b>*NULLPTR_DEREFERENCE:</b>*  `cc` could be null (last assigned on line 
262) and is dereferenced.
   
   ❗❗ <b>2 similar findings have been found in this PR</b>
   
   <details><summary>🔎 Expand here to view all instances of this 
finding</summary><br/>
     
     
   <div align=\"center\">
   
   
   | **File Path** | **Line Number** |
   | ------------- | ------------- |
   | 
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java
 | 
[233](https://github.com/apache/solr/blob/4fd67205b96c5c4b290b14c15c691b17fbff6495/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java#L233)
 |
   | 
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java
 | 
[226](https://github.com/apache/solr/blob/4fd67205b96c5c4b290b14c15c691b17fbff6495/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractInstallShardTest.java#L226)
 |
   <p><a 
href="https://lift.sonatype.com/results/github.com/apache/solr/01GW86YY8J27H55NP8ZPK636RD?t=Infer|NULLPTR_DEREFERENCE"
 target="_blank">Visit the Lift Web Console</a> to find more details in your 
report.</p></div></details>
   
   
   
   ---
   
   <details><summary>ℹī¸ Expand to see all <b>@sonatype-lift</b> 
commands</summary>
   
   You can reply with the following commands. For example, reply with 
***@sonatype-lift ignoreall*** to leave out all findings.
   | **Command** | **Usage** |
   | ------------- | ------------- |
   | `@sonatype-lift ignore` | Leave out the above finding from this PR |
   | `@sonatype-lift ignoreall` | Leave out all the existing findings from this 
PR |
   | `@sonatype-lift exclude <file\|issue\|path\|tool>` | Exclude specified 
`file\|issue\|path\|tool` from Lift findings by updating your config.toml file |
   
   **Note:** When talking to LiftBot, you need to **refresh** the page to see 
its response.
   <sub>[Click here](https://github.com/apps/sonatype-lift/installations/new) 
to add LiftBot to another repo.</sub></details>
   
   
   
   ---
   
   <b>Help us improve LIFT! (<i>Sonatype LiftBot external survey</i>)</b>
   
   Was this a good recommendation for you? <sub><small>Answering this survey 
will not impact your Lift settings.</small></sub>
   
   [ [🙁 Not 
relevant](https://www.sonatype.com/lift-comment-rating?comment=451880984&lift_comment_rating=1)
 ] - [ [😕 Won't 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880984&lift_comment_rating=2)
 ] - [ [😑 Not critical, will 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880984&lift_comment_rating=3)
 ] - [ [🙂 Critical, will 
fix](https://www.sonatype.com/lift-comment-rating?comment=451880984&lift_comment_rating=4)
 ] - [ [😊 Critical, fixing 
now](https://www.sonatype.com/lift-comment-rating?comment=451880984&lift_comment_rating=5)
 ]



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org
For additional commands, e-mail: issues-h...@solr.apache.org


Reply via email to