This is an automated email from the ASF dual-hosted git repository.

dcapwell pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 9a534ba06f14729c08eb55b90878ef32d5423782
Merge: f0cb7f1c1a 4091613298
Author: David Capwell <[email protected]>
AuthorDate: Fri Nov 7 11:52:26 2025 -0800

    Merge branch 'cassandra-5.0' into trunk

 CHANGES.txt                                        |  1 +
 .../db/compaction/LeveledCompactionStrategy.java   | 28 +++++++++++++++++++---
 .../compaction/LeveledCompactionStrategyTest.java  | 18 ++++++++++++++
 .../schema/CreateTableValidationTest.java          |  7 ++++++
 .../cassandra/utils/CassandraGenerators.java       |  9 ++++---
 5 files changed, 57 insertions(+), 6 deletions(-)

diff --cc CHANGES.txt
index e4ed0d362f,688430f674..b4996c26ba
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -325,19 -96,7 +325,20 @@@ Merged from 4.1
   * Optionally skip exception logging on invalid legacy protocol magic 
exception (CASSANDRA-19483)
   * Fix SimpleClient ability to release acquired capacity (CASSANDRA-20202)
   * Fix WaitQueue.Signal.awaitUninterruptibly may block forever if invoking 
thread is interrupted (CASSANDRA-20084)
 + * Run audit_logging_options through santiation and validation on startup 
(CASSANDRA-20208)
 + * Enforce CQL message size limit on multiframe messages (CASSANDRA-20052)
 + * Fix race condition in DecayingEstimatedHistogramReservoir during rescale 
(CASSANDRA-19365)
  Merged from 4.0:
++ * Leveled Compaction doesn't validate maxBytesForLevel when the table is 
altered/created (CASSANDRA-20570)
 + * Updated dtest-api to 0.0.18 and removed JMX-related classes that now live 
in the dtest-api (CASSANDRA-20884)
 + * Fixed incorrect error message constant for keyspace name length validation 
(CASSANDRA-20915)
 + * Prevent too long table names not fitting file names (CASSANDRA-20389)
 + * Update Jackson to 2.19.2 (CASSANDRA-20848)
 + * Update commons-lang3 to 3.18.0 (CASSANDRA-20849)
 + * Add NativeTransportMaxConcurrentConnectionsPerIp to StorageProxyMBean 
(CASSANDRA-20642)
 + * Make secondary index implementations notified about rows in fully expired 
SSTables in compaction (CASSANDRA-20829)
 + * Ensure prepared_statement INSERT timestamp precedes eviction DELETE 
(CASSANDRA-19703)
 + * Gossip doesn't converge due to race condition when updating EndpointStates 
multiple fields (CASSANDRA-20659)
   * Handle sstable metadata stats file getting a new mtime after compaction 
has finished (CASSANDRA-18119)
   * Honor MAX_PARALLEL_TRANSFERS correctly (CASSANDRA-20532)
   * Updating a column with a new TTL but same expiration time is 
non-deterministic and causes repair mismatches. (CASSANDRA-20561)
diff --cc test/unit/org/apache/cassandra/utils/CassandraGenerators.java
index b428e9acb9,b064ec98fc..edfbe88163
--- a/test/unit/org/apache/cassandra/utils/CassandraGenerators.java
+++ b/test/unit/org/apache/cassandra/utils/CassandraGenerators.java
@@@ -48,20 -38,6 +48,19 @@@ import javax.annotation.Nullable
  
  import com.google.common.collect.ImmutableList;
  import com.google.common.collect.ImmutableMap;
 +import com.google.common.collect.ImmutableSet;
 +import com.google.common.collect.Sets;
 +
 +import accord.utils.SortedArrays.SortedArrayList;
- import org.apache.cassandra.db.compaction.LeveledManifest;
 +import org.apache.cassandra.schema.*;
 +import 
org.apache.cassandra.service.consensus.migration.ConsensusMigrationState;
 +import org.apache.cassandra.tcm.extensions.ExtensionKey;
 +import org.apache.cassandra.tcm.extensions.ExtensionValue;
 +import org.apache.cassandra.tcm.membership.Directory;
 +import org.apache.cassandra.tcm.ownership.DataPlacements;
 +import org.apache.cassandra.tcm.ownership.TokenMap;
 +import org.apache.cassandra.tcm.sequences.InProgressSequences;
 +import org.apache.cassandra.tcm.sequences.LockedRanges;
  import org.apache.commons.lang3.builder.MultilineRecursiveToStringStyle;
  import org.apache.commons.lang3.builder.ReflectionToStringBuilder;
  
@@@ -142,7 -92,6 +141,8 @@@ import static org.apache.cassandra.util
  import static org.apache.cassandra.utils.Generators.SMALL_TIME_SPAN_NANOS;
  import static org.apache.cassandra.utils.Generators.TIMESTAMP_NANOS;
  import static org.apache.cassandra.utils.Generators.TINY_TIME_SPAN_NANOS;
 +import static org.apache.cassandra.utils.Generators.directAndHeapBytes;
++import static org.junit.Assert.assertTrue;
  
  public final class CassandraGenerators
  {
@@@ -305,630 -185,6 +305,633 @@@
          return new TableMetadataBuilder().withKeyspaceName(ks).build(rnd);
      }
  
 +    public static Gen<String> sstableFormatNames()
 +    {
 +        return SourceDSL.arbitrary().pick("big", "bti");
 +    }
 +
 +    public static Gen<SSTableFormat<?, ?>> sstableFormat()
 +    {
 +        // make sure ordering is determanstic, else repeatability breaks
 +        NavigableMap<String, SSTableFormat<?, ?>> formats = new 
TreeMap<>(DatabaseDescriptor.getSSTableFormats());
 +        return SourceDSL.arbitrary().pick(new ArrayList<>(formats.values()));
 +    }
 +
 +    public static class AbstractReplicationStrategyBuilder
 +    {
 +        public enum Strategy
 +        {
 +            Simple(true),
 +            NetworkTopology(true),
 +            Local(false),
 +            Meta(false);
 +
 +            public final boolean userAllowed;
 +
 +            Strategy(boolean userAllowed)
 +            {
 +                this.userAllowed = userAllowed;
 +            }
 +        }
 +
 +        private Gen<Strategy> strategyGen = 
SourceDSL.arbitrary().enumValues(Strategy.class);
 +        private Gen<String> keyspaceNameGen = KEYSPACE_NAME_GEN;
 +        private Gen<Integer> rfGen = SourceDSL.integers().between(1, 3);
 +        private Gen<List<String>> networkTopologyDCGen = rs -> {
 +            Gen<Integer> numDcsGen = SourceDSL.integers().between(1, 3);
 +            Gen<String> nameGen = IDENTIFIER_GEN;
 +            Set<String> dcs = new HashSet<>();
 +            int targetSize = numDcsGen.generate(rs);
 +            while (dcs.size() != targetSize)
 +                dcs.add(nameGen.generate(rs));
 +            List<String> ordered = new ArrayList<>(dcs);
 +            ordered.sort(Comparator.naturalOrder());
 +            return ordered;
 +        };
 +
 +        public AbstractReplicationStrategyBuilder withKeyspace(Gen<String> 
keyspaceNameGen)
 +        {
 +            this.keyspaceNameGen = keyspaceNameGen;
 +            return this;
 +        }
 +
 +        public AbstractReplicationStrategyBuilder withKeyspace(String 
keyspace)
 +        {
 +            this.keyspaceNameGen = i -> keyspace;
 +            return this;
 +        }
 +
 +        public AbstractReplicationStrategyBuilder withUserAllowed()
 +        {
 +            List<Strategy> allowed = Stream.of(Strategy.values()).filter(s -> 
s.userAllowed).collect(Collectors.toList());
 +            strategyGen = SourceDSL.arbitrary().pick(allowed);
 +            return this;
 +        }
 +
 +        public AbstractReplicationStrategyBuilder withRf(Gen<Integer> rfGen)
 +        {
 +            this.rfGen = rfGen;
 +            return this;
 +        }
 +
 +        public AbstractReplicationStrategyBuilder withRf(int rf)
 +        {
 +            this.rfGen = i -> rf;
 +            return this;
 +        }
 +
 +        public AbstractReplicationStrategyBuilder 
withDatacenters(Gen<List<String>> networkTopologyDCGen)
 +        {
 +            this.networkTopologyDCGen = networkTopologyDCGen;
 +            return this;
 +        }
 +
 +        public AbstractReplicationStrategyBuilder withDatacenters(String 
first, String... rest)
 +        {
 +            if (rest.length == 0)
 +            {
 +                this.networkTopologyDCGen = i -> 
Collections.singletonList(first);
 +            }
 +            else
 +            {
 +                List<String> all = new ArrayList<>(rest.length + 1);
 +                all.add(first);
 +                all.addAll(Arrays.asList(rest));
 +                this.networkTopologyDCGen = i -> all;
 +            }
 +            return this;
 +        }
 +
 +        public Gen<AbstractReplicationStrategy> build()
 +        {
 +            return rs -> {
 +                Strategy strategy = strategyGen.generate(rs);
 +                switch (strategy)
 +                {
 +                    case Simple:
 +                        return new 
SimpleStrategy(keyspaceNameGen.generate(rs),
 +                                                  
ImmutableMap.of(SimpleStrategy.REPLICATION_FACTOR, 
rfGen.generate(rs).toString()));
 +                    case NetworkTopology:
 +                        ImmutableMap.Builder<String, String> builder = 
ImmutableMap.builder();
 +                        List<String> names = 
networkTopologyDCGen.generate(rs);
 +                        for (String name : names)
 +                            builder.put(name, rfGen.generate(rs).toString());
 +                        ImmutableMap<String, String> map = builder.build();
 +                        return new 
TestableNetworkTopologyStrategy(keyspaceNameGen.generate(rs), map);
 +                    case Meta:
 +                        return new MetaStrategy(keyspaceNameGen.generate(rs), 
ImmutableMap.of());
 +                    case Local:
 +                        return new 
LocalStrategy(keyspaceNameGen.generate(rs), ImmutableMap.of());
 +                    default:
 +                        throw new 
UnsupportedOperationException(strategy.name());
 +                }
 +            };
 +        }
 +    }
 +
 +    public static class TestableNetworkTopologyStrategy extends 
NetworkTopologyStrategy
 +    {
 +        public TestableNetworkTopologyStrategy(String keyspaceName, 
Map<String, String> configOptions) throws ConfigurationException
 +        {
 +            super(keyspaceName, configOptions);
 +        }
 +
 +        @Override
 +        public Collection<String> recognizedOptions(ClusterMetadata metadata)
 +        {
 +            return configOptions.keySet();
 +        }
 +    }
 +
 +    public static KeyspaceMetadataBuilder regularKeyspace()
 +    {
 +        return new 
KeyspaceMetadataBuilder().withKind(KeyspaceMetadata.Kind.REGULAR);
 +    }
 +
 +    public static class KeyspaceMetadataBuilder
 +    {
 +        private Gen<String> nameGen = KEYSPACE_NAME_GEN;
 +        private Gen<KeyspaceMetadata.Kind> kindGen = 
SourceDSL.arbitrary().enumValues(KeyspaceMetadata.Kind.class);
 +        private Gen<AbstractReplicationStrategyBuilder> replicationGen = i -> 
new AbstractReplicationStrategyBuilder();
 +        private Gen<Boolean> durableWritesGen = SourceDSL.booleans().all();
 +
 +        public KeyspaceMetadataBuilder 
withReplication(Gen<AbstractReplicationStrategyBuilder> replicationGen)
 +        {
 +            this.replicationGen = replicationGen;
 +            return this;
 +        }
 +
 +        public KeyspaceMetadataBuilder 
withReplication(AbstractReplicationStrategyBuilder replication)
 +        {
 +            this.replicationGen = i -> replication;
 +            return this;
 +        }
 +
 +        public KeyspaceMetadataBuilder withName(Gen<String> nameGen)
 +        {
 +            this.nameGen = nameGen;
 +            return this;
 +        }
 +
 +        public KeyspaceMetadataBuilder withName(String name)
 +        {
 +            this.nameGen = i -> name;
 +            return this;
 +        }
 +
 +        public KeyspaceMetadataBuilder withKind(Gen<KeyspaceMetadata.Kind> 
kindGen)
 +        {
 +            this.kindGen = kindGen;
 +            return this;
 +        }
 +
 +        public KeyspaceMetadataBuilder withKind(KeyspaceMetadata.Kind kind)
 +        {
 +            this.kindGen = i -> kind;
 +            return this;
 +        }
 +
 +        public Gen<KeyspaceMetadata> build()
 +        {
 +            return rs -> {
 +                String name = nameGen.generate(rs);
 +                KeyspaceMetadata.Kind kind = kindGen.generate(rs);
 +                AbstractReplicationStrategy replication = 
replicationGen.generate(rs).withKeyspace(nameGen).build().generate(rs);
 +                ReplicationParams replicationParams = 
ReplicationParams.fromStrategy(replication);
 +                boolean durableWrites = durableWritesGen.generate(rs);
 +                KeyspaceParams params = new KeyspaceParams(durableWrites, 
replicationParams, FastPathStrategy.simple());
 +                Tables tables = Tables.none();
 +                Views views = Views.none();
 +                Types types = Types.none();
 +                UserFunctions userFunctions = UserFunctions.none();
 +                return KeyspaceMetadata.createUnsafe(name, kind, params, 
tables, views, types, userFunctions);
 +            };
 +        }
 +    }
 +
 +    public static Gen<CachingParams> cachingParamsGen()
 +    {
 +        return rnd -> {
 +            boolean cacheKeys = nextBoolean(rnd);
 +            int rowsPerPartitionToCache;
 +            switch (SourceDSL.integers().between(1, 3).generate(rnd))
 +            {
 +                case 1: // ALL
 +                    rowsPerPartitionToCache = Integer.MAX_VALUE;
 +                    break;
 +                case 2: // NONE
 +                    rowsPerPartitionToCache = 0;
 +                    break;
 +                case 3: // num values
 +                    rowsPerPartitionToCache = 
Math.toIntExact(rnd.next(Constraint.between(1, Integer.MAX_VALUE - 1)));
 +                    break;
 +                default:
 +                    throw new AssertionError();
 +            }
 +            return new CachingParams(cacheKeys, rowsPerPartitionToCache);
 +        };
 +    }
 +
 +    public enum KnownCompactionAlgo
 +    {
 +        SizeTiered(SizeTieredCompactionStrategy.class),
 +        Leveled(LeveledCompactionStrategy.class),
 +        Unified(UnifiedCompactionStrategy.class);
 +        private final Class<? extends AbstractCompactionStrategy> klass;
 +
 +        KnownCompactionAlgo(Class<? extends AbstractCompactionStrategy> klass)
 +        {
 +            this.klass = klass;
 +        }
 +    }
 +
 +    public static class CompactionParamsBuilder
 +    {
 +        private Gen<KnownCompactionAlgo> algoGen = 
SourceDSL.arbitrary().enumValues(KnownCompactionAlgo.class);
 +        private Gen<CompactionParams.TombstoneOption> tombstoneOptionGen = 
SourceDSL.arbitrary().enumValues(CompactionParams.TombstoneOption.class);
 +        private Gen<Map<String, String>> sizeTieredOptions = rnd -> {
 +            if (nextBoolean(rnd)) return Map.of();
 +            Map<String, String> options = new HashMap<>();
 +            if (nextBoolean(rnd))
 +                // computes mb then converts to bytes
 +                
options.put(SizeTieredCompactionStrategyOptions.MIN_SSTABLE_SIZE_KEY, 
Long.toString(SourceDSL.longs().between(1, 100).generate(rnd) * 1024L * 1024L));
 +            if (nextBoolean(rnd))
 +                
options.put(SizeTieredCompactionStrategyOptions.BUCKET_LOW_KEY, 
Double.toString(SourceDSL.doubles().between(0.1, 0.9).generate(rnd)));
 +            if (nextBoolean(rnd))
 +                
options.put(SizeTieredCompactionStrategyOptions.BUCKET_HIGH_KEY, 
Double.toString(SourceDSL.doubles().between(1.1, 1.9).generate(rnd)));
 +            return options;
 +        };
 +        private Gen<Map<String, String>> leveledOptions = rnd -> {
 +            if (nextBoolean(rnd)) return Map.of();
 +            Map<String, String> options = new HashMap<>();
 +            if (nextBoolean(rnd))
 +                options.putAll(sizeTieredOptions.generate(rnd));
 +            int maxSSTableSizeInMB = 
LeveledCompactionStrategy.DEFAULT_MAX_SSTABLE_SIZE_MIB;
 +            if (nextBoolean(rnd))
 +            {
 +                // size in mb
 +                maxSSTableSizeInMB = SourceDSL.integers().between(1, 
2_000).generate(rnd);
 +                options.put(LeveledCompactionStrategy.SSTABLE_SIZE_OPTION, 
Integer.toString(maxSSTableSizeInMB));
 +            }
 +            if (nextBoolean(rnd))
 +            {
 +                // there is a relationship between sstable size and fanout, 
so respect it
 +                // see CASSANDRA-20570: Leveled Compaction doesn't validate 
maxBytesForLevel when the table is altered/created
 +                long maxSSTableSizeInBytes = maxSSTableSizeInMB * 1024L * 
1024L;
 +                Gen<Integer> gen = SourceDSL.integers().between(1, 100);
 +                Integer value = gen.generate(rnd);
 +                while (true)
 +                {
 +                    try
 +                    {
 +                        // see 
org.apache.cassandra.db.compaction.LeveledGenerations.MAX_LEVEL_COUNT for why 8 
is hard coded here
-                         LeveledManifest.maxBytesForLevel(8, value, 
maxSSTableSizeInBytes);
++                        // LeveledManifest.maxBytesForLevel(8, value, 
maxSSTableSizeInBytes);
++                        
options.put(LeveledCompactionStrategy.LEVEL_FANOUT_SIZE_OPTION, 
value.toString());
++                        LeveledCompactionStrategy.validateOptions(options);
 +                        break; // value is good, keep it
 +                    }
-                     catch (RuntimeException e)
++                    catch (ConfigurationException e)
 +                    {
++                        assertTrue(e.getMessage().contains("your 
maxSSTableSize must be absurdly high to compute"));
 +                        // this value is too large... lets shrink it
 +                        if (value.intValue() == 1)
 +                            throw new AssertionError("There is no possible 
fanout size that works with maxSSTableSizeInMB=" + maxSSTableSizeInMB);
 +                        gen = SourceDSL.integers().between(1, value - 1);
 +                        value = gen.generate(rnd);
 +                    }
 +                }
 +                
options.put(LeveledCompactionStrategy.LEVEL_FANOUT_SIZE_OPTION, 
value.toString());
 +            }
 +            if (nextBoolean(rnd))
 +                
options.put(LeveledCompactionStrategy.SINGLE_SSTABLE_UPLEVEL_OPTION, 
nextBoolean(rnd).toString());
 +            return options;
 +        };
 +        private Gen<Map<String, String>> unifiedOptions = rnd -> {
 +            if (nextBoolean(rnd)) return Map.of();
 +            Gen<String> storageSizeGen = 
Generators.filter(humanReadableStorageSimple(), s -> 
Controller.MIN_TARGET_SSTABLE_SIZE <= FBUtilities.parseHumanReadableBytes(s));
 +            Map<String, String> options = new HashMap<>();
 +            if (nextBoolean(rnd))
 +                options.put(Controller.BASE_SHARD_COUNT_OPTION, 
SourceDSL.integers().between(1, 10).generate(rnd).toString());
 +            if (nextBoolean(rnd))
 +                options.put(Controller.FLUSH_SIZE_OVERRIDE_OPTION, 
storageSizeGen.generate(rnd));
 +            if (nextBoolean(rnd))
 +                options.put(Controller.MAX_SSTABLES_TO_COMPACT_OPTION, 
SourceDSL.integers().between(0, 32).generate(rnd).toString());
 +            if (nextBoolean(rnd))
 +                options.put(Controller.SSTABLE_GROWTH_OPTION, 
SourceDSL.integers().between(0, 100).generate(rnd) + "%");
 +            if (nextBoolean(rnd))
 +                options.put(Controller.OVERLAP_INCLUSION_METHOD_OPTION, 
SourceDSL.arbitrary().enumValues(Overlaps.InclusionMethod.class).generate(rnd).name());
 +            if (nextBoolean(rnd))
 +            {
 +                int numLevels = SourceDSL.integers().between(1, 
10).generate(rnd);
 +                String[] scalingParams = new String[numLevels];
 +                Gen<Integer> levelSize = SourceDSL.integers().between(2, 10);
 +                for (int i = 0; i < numLevels; i++)
 +                {
 +                    String value;
 +                    switch (SourceDSL.integers().between(0, 3).generate(rnd))
 +                    {
 +                        case 0:
 +                            value = "N";
 +                            break;
 +                        case 1:
 +                            value = "L" + levelSize.generate(rnd);
 +                            break;
 +                        case 2:
 +                            value = "T" + levelSize.generate(rnd);
 +                            break;
 +                        case 3:
 +                            value = 
SourceDSL.integers().all().generate(rnd).toString();
 +                            break;
 +                        default:
 +                            throw new AssertionError();
 +                    }
 +                    scalingParams[i] = value;
 +                }
 +                options.put(Controller.SCALING_PARAMETERS_OPTION, 
String.join(",", scalingParams));
 +            }
 +            if (nextBoolean(rnd))
 +            {
 +                // Calculate TARGET then compute the MIN from that.  The 
issue is that there is a hidden relationship
 +                // between these 2 fields more complex than simple 
comparability, MIN must be < 70% * TARGET!
 +                // See CASSANDRA-20398
 +                // 1MiB to 128MiB target
 +                long targetBytes = SourceDSL.longs().between(1L << 20, 1L << 
27).generate(rnd);
 +                long limit = (long) Math.ceil(targetBytes * Math.sqrt(0.5));
 +                long minBytes = SourceDSL.longs().between(1, limit - 
1).generate(rnd);
 +                options.put(Controller.MIN_SSTABLE_SIZE_OPTION, minBytes + 
"B");
 +                options.put(Controller.TARGET_SSTABLE_SIZE_OPTION, 
targetBytes + "B");
 +            }
 +            return options;
 +        };
 +        //TODO (coverage): doesn't look to validate > 1, what does that even 
mean?
 +        private Gen<Float> tombstoneThreshold = SourceDSL.floats().between(0, 
1);
 +        private Gen<Boolean> uncheckedTombstoneCompaction = 
SourceDSL.booleans().all();
 +        private Gen<Boolean> onlyPurgeRepairedTombstones = 
SourceDSL.booleans().all();
 +
 +        public Gen<CompactionParams> build()
 +        {
 +            return rnd -> {
 +                KnownCompactionAlgo algo = algoGen.generate(rnd);
 +                Map<String, String> options = new HashMap<>();
 +                if (nextBoolean(rnd))
 +                    
options.put(CompactionParams.Option.PROVIDE_OVERLAPPING_TOMBSTONES.toString(), 
tombstoneOptionGen.generate(rnd).name());
 +                if (CompactionParams.supportsThresholdParams(algo.klass) && 
nextBoolean(rnd))
 +                {
 +                    
options.put(CompactionParams.Option.MIN_THRESHOLD.toString(), 
Long.toString(rnd.next(Constraint.between(2, 4))));
 +                    
options.put(CompactionParams.Option.MAX_THRESHOLD.toString(), 
Long.toString(rnd.next(Constraint.between(5, 32))));
 +                }
 +                if (nextBoolean(rnd))
 +                    
options.put(AbstractCompactionStrategy.TOMBSTONE_THRESHOLD_OPTION, 
tombstoneThreshold.generate(rnd).toString());
 +                if (nextBoolean(rnd))
 +                    
options.put(AbstractCompactionStrategy.UNCHECKED_TOMBSTONE_COMPACTION_OPTION, 
uncheckedTombstoneCompaction.generate(rnd).toString());
 +                if (nextBoolean(rnd))
 +                    
options.put(AbstractCompactionStrategy.ONLY_PURGE_REPAIRED_TOMBSTONES, 
onlyPurgeRepairedTombstones.generate(rnd).toString());
 +                switch (algo)
 +                {
 +                    case SizeTiered:
 +                        options.putAll(sizeTieredOptions.generate(rnd));
 +                        break;
 +                    case Leveled:
 +                        options.putAll(leveledOptions.generate(rnd));
 +                        break;
 +                    case Unified:
 +                        options.putAll(unifiedOptions.generate(rnd));
 +                        break;
 +                    default:
 +                        throw new UnsupportedOperationException(algo.name());
 +                }
 +                return CompactionParams.create(algo.klass, options);
 +            };
 +        }
 +    }
 +
 +    private static Boolean nextBoolean(RandomnessSource rnd)
 +    {
 +        return SourceDSL.booleans().all().generate(rnd);
 +    }
 +
 +    public static Gen<CompactionParams> compactionParamsGen()
 +    {
 +        return new CompactionParamsBuilder().build();
 +    }
 +
 +    public enum KnownCompressionAlgo
 +    {
 +        snappy("SnappyCompressor"),
 +        deflate("DeflateCompressor"),
 +        lz4("LZ4Compressor"),
 +        zstd("ZstdCompressor"),
 +        noop("NoopCompressor");
 +
 +        private final String compressor;
 +
 +        KnownCompressionAlgo(String compressor)
 +        {
 +            this.compressor = compressor;
 +        }
 +    }
 +
 +    public static class CompressionParamsBuilder
 +    {
 +        private Gen<Boolean> enabledGen = SourceDSL.booleans().all();
 +        private Gen<KnownCompressionAlgo> algoGen = 
SourceDSL.arbitrary().enumValues(KnownCompressionAlgo.class);
 +        private Gen<Map<String, String>> lz4OptionsGen = rnd -> {
 +            if (nextBoolean(rnd))
 +                return Map.of();
 +            Map<String, String> options = new HashMap<>();
 +            if (nextBoolean(rnd))
 +                options.put(LZ4Compressor.LZ4_COMPRESSOR_TYPE, 
nextBoolean(rnd) ? LZ4Compressor.LZ4_FAST_COMPRESSOR : 
LZ4Compressor.LZ4_HIGH_COMPRESSOR);
 +            if (nextBoolean(rnd))
 +                options.put(LZ4Compressor.LZ4_HIGH_COMPRESSION_LEVEL, 
Integer.toString(Math.toIntExact(rnd.next(Constraint.between(1, 17)))));
 +            return options;
 +        };
 +        private Gen<Map<String, String>> zstdOptionsGen = rnd -> {
 +            if (nextBoolean(rnd))
 +                return Map.of();
 +            int level = 
Math.toIntExact(rnd.next(Constraint.between(ZstdCompressor.FAST_COMPRESSION_LEVEL,
 ZstdCompressor.BEST_COMPRESSION_LEVEL)));
 +            return Map.of(ZstdCompressor.COMPRESSION_LEVEL_OPTION_NAME, 
Integer.toString(level));
 +        };
 +
 +        public Gen<CompressionParams> build()
 +        {
 +            return rnd -> {
 +                if (!enabledGen.generate(rnd))
 +                    return CompressionParams.noCompression();
 +                KnownCompressionAlgo algo = algoGen.generate(rnd);
 +                if (algo == KnownCompressionAlgo.noop)
 +                    return CompressionParams.noop();
 +                // when null disabled
 +                int chunkLength = CompressionParams.DEFAULT_CHUNK_LENGTH;
 +                double minCompressRatio = 
CompressionParams.DEFAULT_MIN_COMPRESS_RATIO;
 +                Map<String, String> options;
 +                switch (algo)
 +                {
 +                    case lz4:
 +                        options = lz4OptionsGen.generate(rnd);
 +                        break;
 +                    case zstd:
 +                        options = zstdOptionsGen.generate(rnd);
 +                        break;
 +                    default:
 +                        options = Map.of();
 +                }
 +                return new CompressionParams(algo.compressor, options, 
chunkLength, minCompressRatio);
 +            };
 +        }
 +    }
 +
 +    public static Gen<CompressionParams> compressionParamsGen()
 +    {
 +        return new CompressionParamsBuilder().build();
 +    }
 +
 +    public static class TableParamsBuilder
 +    {
 +        @Nullable
 +        private Gen<String> memtableKeyGen = null;
 +        @Nullable
 +        private Gen<CachingParams> cachingParamsGen = null;
 +        @Nullable
 +        private Gen<CompactionParams> compactionParamsGen = null;
 +        @Nullable
 +        private Gen<CompressionParams> compressionParamsGen = null;
 +        @Nullable
 +        private Gen<TransactionalMode> transactionalMode = null;
 +        @Nullable
 +        private Gen<FastPathStrategy> fastPathStrategy = null;
 +
 +        public TableParamsBuilder withKnownMemtables()
 +        {
 +            Set<String> known = MemtableParams.knownDefinitions();
 +            // for testing reason, some invalid types are added; filter out
 +            List<String> valid = known.stream().filter(name -> 
!name.startsWith("test_")).collect(Collectors.toList());
 +            memtableKeyGen = SourceDSL.arbitrary().pick(valid);
 +            return this;
 +        }
 +
 +        public TableParamsBuilder withCaching()
 +        {
 +            cachingParamsGen = cachingParamsGen();
 +            return this;
 +        }
 +
 +        public TableParamsBuilder withCompaction()
 +        {
 +            compactionParamsGen = compactionParamsGen();
 +            return this;
 +        }
 +
 +        public TableParamsBuilder withCompression()
 +        {
 +            compressionParamsGen = compressionParamsGen();
 +            return this;
 +        }
 +
 +        public TableParamsBuilder 
withTransactionalMode(Gen<TransactionalMode> transactionalMode)
 +        {
 +            this.transactionalMode = transactionalMode;
 +            return this;
 +        }
 +
 +        public TableParamsBuilder withTransactionalMode()
 +        {
 +            return 
withTransactionalMode(SourceDSL.arbitrary().enumValues(TransactionalMode.class));
 +        }
 +
 +        public TableParamsBuilder withTransactionalMode(TransactionalMode 
transactionalMode)
 +        {
 +            return 
withTransactionalMode(SourceDSL.arbitrary().constant(transactionalMode));
 +        }
 +
 +        public TableParamsBuilder withFastPathStrategy()
 +        {
 +            fastPathStrategy = rnd -> {
 +                FastPathStrategy.Kind kind = 
SourceDSL.arbitrary().enumValues(FastPathStrategy.Kind.class).generate(rnd);
 +                switch (kind)
 +                {
 +                    case SIMPLE:
 +                        return SimpleFastPathStrategy.instance;
 +                    case INHERIT_KEYSPACE:
 +                        return InheritKeyspaceFastPathStrategy.instance;
 +                    case PARAMETERIZED:
 +                    {
 +                        Map<String, String> map = new HashMap<>();
 +                        int size = SourceDSL.integers().between(1, 
Integer.MAX_VALUE).generate(rnd);
 +                        map.put(ParameterizedFastPathStrategy.SIZE, 
Integer.toString(size));
 +                        Set<String> names = new HashSet<>();
 +                        Gen<String> nameGen = 
SourceDSL.strings().allPossible().ofLengthBetween(1, 10)
 +                                                       // If : is in the name 
then the parser will fail; we have validation to disalow this
 +                                                       .map(s -> 
s.replace(":", "_"))
 +                                                       // Names are used for 
DCs and those are seperated by ,
 +                                                       .map(s -> 
s.replace(",", "_"))
 +                                                       .assuming(s -> 
!s.trim().isEmpty());
 +                        // DCs is optional, allow 0 dcs:
 +                        int numNames = SourceDSL.integers().between(0, 
10).generate(rnd);
 +                        for (int i = 0; i < numNames; i++)
 +                        {
 +                            while (!names.add(nameGen.generate(rnd)))
 +                            {
 +                            }
 +                        }
 +                        List<String> sortedNames = new ArrayList<>(names);
 +                        sortedNames.sort(Comparator.naturalOrder());
 +                        List<String> dcs = new ArrayList<>(names.size());
 +                        boolean auto = 
SourceDSL.booleans().all().generate(rnd);
 +                        if (auto)
 +                        {
 +                            dcs.addAll(sortedNames);
 +                        }
 +                        else
 +                        {
 +                            for (String name : sortedNames)
 +                            {
 +                                int weight = SourceDSL.integers().between(0, 
10).generate(rnd);
 +                                dcs.add(name + ":" + weight);
 +                            }
 +                        }
 +                        // str: dcFormat(,dcFormat)*
 +                        //      dcFormat: name | weight
 +                        //      weight: int: >= 0
 +                        //      note: can't mix auto and user defined weight; 
need one or the other.  Names must be unique
 +                        if (!dcs.isEmpty())
 +                            map.put(ParameterizedFastPathStrategy.DCS, 
String.join(",", dcs));
 +                        return ParameterizedFastPathStrategy.fromMap(map);
 +                    }
 +                    default:
 +                        throw new UnsupportedOperationException(kind.name());
 +                }
 +            };
 +            return this;
 +        }
 +
 +        public Gen<TableParams> build()
 +        {
 +            return rnd -> {
 +                TableParams.Builder params = TableParams.builder();
 +                if (memtableKeyGen != null)
 +                    
params.memtable(MemtableParams.get(memtableKeyGen.generate(rnd)));
 +                if (cachingParamsGen != null)
 +                    params.caching(cachingParamsGen.generate(rnd));
 +                if (compactionParamsGen != null)
 +                    params.compaction(compactionParamsGen.generate(rnd));
 +                if (compressionParamsGen != null)
 +                    params.compression(compressionParamsGen.generate(rnd));
 +                if (transactionalMode != null)
 +                    params.transactionalMode(transactionalMode.generate(rnd));
 +                if (fastPathStrategy != null)
 +                    params.fastPath(fastPathStrategy.generate(rnd));
 +                return params.build();
 +            };
 +        }
 +    }
 +
 +    public static TableMetadataBuilder regularTable()
 +    {
 +        return new TableMetadataBuilder()
 +               .withTableKinds(TableMetadata.Kind.REGULAR)
 +               .withKnownMemtables();
 +    }
 +
      public static class TableMetadataBuilder
      {
          private Gen<String> ksNameGen = CassandraGenerators.KEYSPACE_NAME_GEN;


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to