cpoerschke commented on a change in pull request #744: URL: https://github.com/apache/solr/pull/744#discussion_r827855990
########## File path: solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java ########## @@ -2225,9 +2225,7 @@ protected String getBaseUrl(HttpSolrClient client) { } public static SolrInputDocument getDoc(Object... fields) throws Exception { - SolrInputDocument doc = new SolrInputDocument(); - addFields(doc, fields); - return doc; + return sdoc(fields); } Review comment: outside scope in this PR here but perhaps long-term only one of `getDoc` and `sdoc` could be kept then. ########## File path: solr/core/src/test/org/apache/solr/update/PeerSyncTest.java ########## @@ -216,23 +218,27 @@ public void test() throws Exception { add(client0, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v)); docsAdded.add(4000); int toAdd = numVersions + 10; - for (int i = 0; i < toAdd; i++) { - add( - client0, - seenLeader, - sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1)); - add( - client1, - seenLeader, - sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1)); - docsAdded.add((int) v + i + 1); - } + int offset = (int) v; + add( + client0, + seenLeader, + IntStream.range(0, toAdd) + .map(i -> i + offset + 1) + .mapToObj(i -> sdoc("id", Integer.toString(i), "_version_", i)) + .collect(Collectors.toList())); Review comment: Wondering if the logic could be simplified by moving the +offset and +1 into the loop/range? ```suggestion IntStream.rangeClosed(1 + offset, toAdd + offset) .mapToObj(i -> sdoc("id", Integer.toString(i), "_version_", i)) .collect(Collectors.toList())); ``` ########## File path: solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java ########## @@ -569,31 +570,69 @@ protected void index(Object... fields) throws Exception { indexDoc(doc); } + private SolrClient clientFor(SolrInputDocument doc) { + return clients.get((doc.getField(id).toString().hashCode() & 0x7fffffff) % clients.size()); + } + /** Indexes the document in both the control client, and a randomly selected client */ protected void indexDoc(SolrInputDocument doc) throws IOException, SolrServerException { + indexDoc(clientFor(doc), null, doc); + } + + protected void indexDoc(SolrClient client, SolrParams params, SolrInputDocument doc) + throws IOException, SolrServerException { controlClient.add(doc); if (shardCount == 0) { // mostly for temp debugging return; } - int which = (doc.getField(id).toString().hashCode() & 0x7fffffff) % clients.size(); - SolrClient client = clients.get(which); client.add(doc); } + /** + * Indexes the stream of documents in both the control client and randomly selected clients (per + * batch) + */ + public void indexDocs(Iterator<SolrInputDocument> docs) throws SolrServerException, IOException { + int batchSize = 100; // TODO make configurable? + List<SolrInputDocument> batch = new ArrayList<>(batchSize); + while (docs.hasNext()) { + batch.add(docs.next()); + if (batch.size() == batchSize) { + indexDocs(clientFor(batch.get(0)), null, batch); + batch.clear(); + } + } + if (batch.size() > 0) { Review comment: ```suggestion if (!batch.isEmpty()) { ``` ########## File path: solr/core/src/test/org/apache/solr/update/PeerSyncTest.java ########## @@ -216,23 +218,27 @@ public void test() throws Exception { add(client0, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v)); docsAdded.add(4000); int toAdd = numVersions + 10; - for (int i = 0; i < toAdd; i++) { - add( - client0, - seenLeader, - sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1)); - add( - client1, - seenLeader, - sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1)); - docsAdded.add((int) v + i + 1); - } + int offset = (int) v; + add( + client0, + seenLeader, + IntStream.range(0, toAdd) + .map(i -> i + offset + 1) + .mapToObj(i -> sdoc("id", Integer.toString(i), "_version_", i)) + .collect(Collectors.toList())); + add( + client1, + seenLeader, + IntStream.range(0, toAdd) + .map(i -> i + offset + 1) + .mapToObj(i -> sdoc("id", Integer.toString(i), "_version_", i)) + .collect(Collectors.toList())); Review comment: ```suggestion IntStream.rangeClosed(1 + offset, toAdd + offset) .mapToObj(i -> sdoc("id", Integer.toString(i), "_version_", i)) .collect(Collectors.toList())); ``` ########## File path: solr/core/src/test/org/apache/solr/handler/component/TestDistributedStatsComponentCardinality.java ########## @@ -77,27 +81,32 @@ public void buildIndex() throws Exception { log.info("Building an index of {} docs", NUM_DOCS); // we want a big spread in the long values we use, decrement by BIG_PRIME as we index - long longValue = MAX_LONG; - - for (int i = 1; i <= NUM_DOCS; i++) { - // with these values, we know that every doc indexed has a unique value in all of the - // fields we will compute cardinality against. - // which means the number of docs matching a query is the true cardinality for each field - - final String strValue = "s" + longValue; - indexDoc( - sdoc( - "id", "" + i, - "int_i", "" + i, - "int_i_prehashed_l", "" + HASHER.hashInt(i).asLong(), - "long_l", "" + longValue, - "long_l_prehashed_l", "" + HASHER.hashLong(longValue).asLong(), - "string_s", strValue, - "string_s_prehashed_l", - "" + HASHER.hashString(strValue, StandardCharsets.UTF_8).asLong())); - - longValue -= BIG_PRIME; - } + final AtomicLong longValue = new AtomicLong(MAX_LONG); + + Iterator<SolrInputDocument> docs = + IntStream.range(1, NUM_DOCS + 1) Review comment: Would `rangeClosed` be equivalent but clearer? ```suggestion IntStream.rangeClosed(1, NUM_DOCS) ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org For additional commands, e-mail: issues-h...@solr.apache.org