Hi
   Could you help  modelling this usecase

   I have below table ..I will update tagid's columns set(bigit) based on
PK. I have created the secondary index column on tagid to query like below..

Select * from keyspace.customer_sensor_tagids where tagids CONTAINS
11358097;

this query is doing the range scan because of the secondary index.. and
causing performance issues

If i create a MV on Tagid's can i be able to query like above.. please
suggest a Datamodel for this scenario.Apprecite your help on this.
-----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
example of Tagids for each row:-
   4608831, 608886, 608890, 609164, 615024, 679579, 814791, 830404, 71756,
8538307, 9936868, 10883336, 10954034, 10958062, 10976553, 10976554,
10980255, 11009971, 11043805, 11075379, 11078819, 11167844, 11358097,
11479340, 11481769, 11481770, 11481771, 11481772, 11693597, 11709012,
12193230, 12421500, 12421516, 12421781, 12422011, 12422368, 12422501,
12422512, 12422553, 12422555, 12423381, 12423382


 
-----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------

   CREATE TABLE keyspace.customer_sensor_tagids (
    customerid bigint,
    sensorid bigint,
    XXX frozen<activity>,
    XXX frozen<asset>,
    XXX text,
    XXX text,
    XXX frozen<hardware>,
    XXX bigint,
    XXX bigint,
    XXX list<frozen<interface>>,
    XXX frozen<inventory>,
    XXX boolean,
    XXX bigint,
    XXX list<frozen<openport>>,
    XXX frozen<operatingsystem>,
    XXX bigint,
    XXX bigint,
    XXX list<frozen<processor>>,
    XXX list<frozen<service>>,
    XXX set<frozen<software>>,
    XXX set<bigint>,
    XXX set<bigint>,
    tagids set<bigint>,
    XXX bigint,
    XXX list<frozen<volume>>,
    PRIMARY KEY ((customerid, sensorid))
) WITH bloom_filter_fp_chance = 0.01
    AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
    AND comment = ''
    AND compaction = {'class':
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy',
'max_threshold': '32', 'min_threshold': '4'}
    AND compression = {'chunk_length_in_kb': '64', 'class':
'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND crc_check_chance = 1.0
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = '99PERCENTILE';
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (values(tagids));
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (values(XXX));
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (XXX);
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (XXX);
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (XXX);
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (XXX);
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (values(XXX));
CREATE INDEX XXX ON keyspace.customer_sensor_tagids (XXX);

Reply via email to