clairemcginty commented on code in PR #3098: URL: https://github.com/apache/parquet-java/pull/3098#discussion_r1920641849
########## parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java: ########## @@ -493,6 +494,39 @@ public <T extends Comparable<T>> Boolean visit(Contains<T> contains) { return contains.filter(this, (l, r) -> l || r, (l, r) -> l && r, v -> BLOCK_MIGHT_MATCH); } + @Override + public Boolean visit(Size size) { + ColumnChunkMetaData meta = getColumnChunk(size.getColumn().getColumnPath()); + + if (meta == null) { + // the column isn't in this file, so fail eq/gt/gte targeting size > 0 + final boolean blockCannotMatch = + size.filter((eq) -> eq > 0, (lt) -> false, (lte) -> false, (gt) -> gt >= 0, (gte) -> gte > 0); + return blockCannotMatch ? BLOCK_CANNOT_MATCH : BLOCK_MIGHT_MATCH; + } + + try { + // We know the block has at least as many array elements as the dictionary sizes + final Set<?> dict = expandDictionary(meta); + if (dict == null) { + return BLOCK_MIGHT_MATCH; + } + int numDistinctValues = dict.size(); + final boolean blockCannotMatch = size.filter( + (eq) -> eq < numDistinctValues, + (lt) -> lt <= numDistinctValues, + (lte) -> lte < numDistinctValues, + (gt) -> false, + (gte) -> false); + Review Comment: yeah... I guess we can't do much here. The most we can infer is that the row group has >= `dictionary.size()`. values spread out over an arbitrary # of elements. So all we could do is rule out size(0) predicates if `dictionary.size() > 0`. ########## parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java: ########## @@ -493,6 +494,39 @@ public <T extends Comparable<T>> Boolean visit(Contains<T> contains) { return contains.filter(this, (l, r) -> l || r, (l, r) -> l && r, v -> BLOCK_MIGHT_MATCH); } + @Override + public Boolean visit(Size size) { + ColumnChunkMetaData meta = getColumnChunk(size.getColumn().getColumnPath()); + + if (meta == null) { + // the column isn't in this file, so fail eq/gt/gte targeting size > 0 + final boolean blockCannotMatch = + size.filter((eq) -> eq > 0, (lt) -> false, (lte) -> false, (gt) -> gt >= 0, (gte) -> gte > 0); + return blockCannotMatch ? BLOCK_CANNOT_MATCH : BLOCK_MIGHT_MATCH; + } + + try { + // We know the block has at least as many array elements as the dictionary sizes + final Set<?> dict = expandDictionary(meta); + if (dict == null) { + return BLOCK_MIGHT_MATCH; + } + int numDistinctValues = dict.size(); + final boolean blockCannotMatch = size.filter( + (eq) -> eq < numDistinctValues, + (lt) -> lt <= numDistinctValues, + (lte) -> lte < numDistinctValues, + (gt) -> false, + (gte) -> false); + Review Comment: yeah... I guess we can't do much here. The most we can infer is that the row group has >= `dictionary.size()`. values spread out over an arbitrary # of elements. So all we could do is rule out `size(0)` predicates if `dictionary.size() > 0`. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@parquet.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@parquet.apache.org For additional commands, e-mail: issues-h...@parquet.apache.org