[ https://issues.apache.org/jira/browse/HIVE-25596?focusedWorklogId=675592&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-675592 ]
ASF GitHub Bot logged work on HIVE-25596: ----------------------------------------- Author: ASF GitHub Bot Created on: 04/Nov/21 01:05 Start Date: 04/Nov/21 01:05 Worklog Time Spent: 10m Work Description: hmangla98 commented on a change in pull request #2724: URL: https://github.com/apache/hive/pull/2724#discussion_r741578189 ########## File path: standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql ########## @@ -51,6 +51,10 @@ CREATE TABLE "REPLICATION_METRICS" ( --Increase the size of RM_PROGRESS to accomodate the replication statistics ALTER TABLE "REPLICATION_METRICS" ALTER "RM_PROGRESS" TYPE varchar(24000); +ALTER TABLE "REPLICATION_METRICS" ALTER "RM_PROGRESS" TYPE varchar(10000); Review comment: This is tested as part of ITestPostgres. ########## File path: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDeserialize.java ########## @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; + +/** + * GenericUDFDeserializeString. + * + */ +@Description(name = "deserialize", + value="_FUNC_(message, encodingFormat) - Returns deserialized string of encoded message.", + extended="Example:\n" + + " > SELECT _FUNC_('H4sIAAAAAAAA/ytJLS4BAAx+f9gEAAAA', 'gzip(json-2.0)') FROM src LIMIT 1;\n" + + " test") +public class GenericUDFDeserialize extends GenericUDF { + + private static final int ARG_COUNT = 2; // Number of arguments to this UDF + private static final String FUNC_NAME = "deserialize"; // External Name + + private transient PrimitiveObjectInspector stringOI = null; + private transient PrimitiveObjectInspector encodingFormat = null; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) + throws UDFArgumentException { + if (arguments.length != ARG_COUNT) { + throw new UDFArgumentException("The function " + FUNC_NAME + " accepts " + ARG_COUNT + " arguments."); + } + for (ObjectInspector arg: arguments) { + if (arg.getCategory() != ObjectInspector.Category.PRIMITIVE || + PrimitiveObjectInspectorUtils.PrimitiveGrouping.STRING_GROUP != PrimitiveObjectInspectorUtils.getPrimitiveGrouping( + ((PrimitiveObjectInspector)arg).getPrimitiveCategory())){ + throw new UDFArgumentTypeException(0, "The arguments to " + FUNC_NAME + " must be a string/varchar"); + } + } + stringOI = (PrimitiveObjectInspector) arguments[0]; + encodingFormat = (PrimitiveObjectInspector) arguments[1]; + return PrimitiveObjectInspectorFactory.javaStringObjectInspector; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + String value = PrimitiveObjectInspectorUtils.getString(arguments[0].get(), stringOI); + String messageFormat = PrimitiveObjectInspectorUtils.getString(arguments[1].get(), encodingFormat); + if (value == null) { + return null; + } else if (messageFormat == null || messageFormat.isEmpty() || JSONMessageEncoder.FORMAT.equalsIgnoreCase(value)) { + return value; + } else if (GzipJSONMessageEncoder.FORMAT.equalsIgnoreCase(messageFormat)) { + return GzipJSONMessageEncoder.getInstance().getDeserializer().deSerializeGenericString(value); + } else { + throw new HiveException("Invalid message format provided: " + messageFormat + " for message: " + value); Review comment: Already included in TestGenericUDFDeserialize#testInvalidMessageString ########## File path: ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java ########## @@ -116,14 +117,17 @@ public void run() { int totalMetricsSize = metrics.size(); List<ReplicationMetrics> replicationMetricsList = new ArrayList<>(totalMetricsSize); ObjectMapper mapper = new ObjectMapper(); + MessageEncoder encoder = MessageFactory.getDefaultInstanceForReplMetrics(conf); + MessageSerializer serializer = encoder.getSerializer(); for (int index = 0; index < totalMetricsSize; index++) { ReplicationMetric metric = metrics.removeFirst(); ReplicationMetrics persistentMetric = new ReplicationMetrics(); persistentMetric.setDumpExecutionId(metric.getDumpExecutionId()); persistentMetric.setScheduledExecutionId(metric.getScheduledExecutionId()); persistentMetric.setPolicy(metric.getPolicy()); - persistentMetric.setProgress(mapper.writeValueAsString(metric.getProgress())); - persistentMetric.setMetadata(mapper.writeValueAsString(metric.getMetadata())); + persistentMetric.setProgress(serializer.serialize(mapper.writeValueAsString(metric.getProgress()))); + persistentMetric.setMetadata(serializer.serialize(mapper.writeValueAsString(metric.getMetadata()))); Review comment: Tested with one such sample metadata entry. Plain text was of 234 bytes and using compression, output string was of 209 Bytes. ########## File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java ########## @@ -36,7 +36,7 @@ public class ReplStatsTracker { // Maintains the length of the RM_Progress column in the RDBMS, which stores the ReplStats - public static int RM_PROGRESS_LENGTH = 24000; + public static int RM_PROGRESS_LENGTH = 10000; Review comment: Attached the sample outputs and size enhancements in comment section. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 675592) Time Spent: 4h 20m (was: 4h 10m) > Compress Hive Replication Metrics while storing > ----------------------------------------------- > > Key: HIVE-25596 > URL: https://issues.apache.org/jira/browse/HIVE-25596 > Project: Hive > Issue Type: Improvement > Reporter: Haymant Mangla > Assignee: Haymant Mangla > Priority: Major > Labels: pull-request-available > Attachments: CompressedRM_Progress(k=10), CompressedRM_Progress(k=5), > PlainTextRM_Progress(k=10), PlainTextRM_Progress(k=5) > > Time Spent: 4h 20m > Remaining Estimate: 0h > > Compress the json fields of sys.replication_metrics table to optimise RDBMS > space usage. -- This message was sent by Atlassian Jira (v8.3.4#803005)