nsivabalan commented on a change in pull request #5076: URL: https://github.com/apache/hudi/pull/5076#discussion_r835971403
########## File path: hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/AbstractHiveSyncHoodieClient.java ########## @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.hive; + +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.common.table.TableSchemaResolver; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.util.ReflectionUtils; +import org.apache.hudi.sync.common.AbstractSyncHoodieClient; +import org.apache.hudi.sync.common.HoodieSyncException; +import org.apache.hudi.sync.common.model.Partition; + +import org.apache.avro.Schema; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.parquet.schema.MessageType; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Base class to sync Hudi tables with Hive based metastores, such as Hive server, HMS or managed Hive services. + */ +public abstract class AbstractHiveSyncHoodieClient extends AbstractSyncHoodieClient { + + protected final HoodieTimeline activeTimeline; + protected final HiveSyncConfig syncConfig; + protected final Configuration hadoopConf; + protected final PartitionValueExtractor partitionValueExtractor; + + public AbstractHiveSyncHoodieClient(HiveSyncConfig syncConfig, Configuration hadoopConf, FileSystem fs) { + super(syncConfig.basePath, syncConfig.assumeDatePartitioning, syncConfig.useFileListingFromMetadata, syncConfig.withOperationField, fs); + this.syncConfig = syncConfig; + this.hadoopConf = hadoopConf; + this.partitionValueExtractor = ReflectionUtils.loadClass(syncConfig.partitionValueExtractorClass); + this.activeTimeline = metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants(); + } + + public HoodieTimeline getActiveTimeline() { + return activeTimeline; + } + + /** + * Iterate over the storage partitions and find if there are any new partitions that need to be added or updated. + * Generate a list of PartitionEvent based on the changes required. + */ + protected List<PartitionEvent> getPartitionEvents(List<Partition> tablePartitions, List<String> partitionStoragePartitions, boolean isDropPartition) { + Map<String, String> paths = new HashMap<>(); + for (Partition tablePartition : tablePartitions) { + List<String> hivePartitionValues = tablePartition.getValues(); + String fullTablePartitionPath = + Path.getPathWithoutSchemeAndAuthority(new Path(tablePartition.getStorageLocation())).toUri().getPath(); + paths.put(String.join(", ", hivePartitionValues), fullTablePartitionPath); + } + + List<PartitionEvent> events = new ArrayList<>(); + for (String storagePartition : partitionStoragePartitions) { + Path storagePartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, storagePartition); + String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); + // Check if the partition values or if hdfs path is the same + List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition); + + if (isDropPartition) { + events.add(PartitionEvent.newPartitionDropEvent(storagePartition)); + } else { + if (!storagePartitionValues.isEmpty()) { + String storageValue = String.join(", ", storagePartitionValues); + if (!paths.containsKey(storageValue)) { + events.add(PartitionEvent.newPartitionAddEvent(storagePartition)); + } else if (!paths.get(storageValue).equals(fullStoragePartitionPath)) { + events.add(PartitionEvent.newPartitionUpdateEvent(storagePartition)); + } + } + } + } + return events; + } + + public abstract List<Partition> getAllPartitions(String tableName); + + public abstract boolean databaseExists(String databaseName); + + public abstract void createDatabase(String databaseName); + + public abstract void updateTableDefinition(String tableName, MessageType newSchema); + + /* + * APIs below need to be re-worked by modeling field comment in hudi-sync-common, + * instead of relying on Avro or Hive schema class. + */ + + public Schema getAvroSchemaWithoutMetadataFields() { Review comment: can this be protected ? ########## File path: hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogClient.java ########## @@ -0,0 +1,483 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.aws.sync; + +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.hive.AbstractHiveSyncHoodieClient; +import org.apache.hudi.hive.HiveSyncConfig; +import org.apache.hudi.sync.common.model.Partition; + +import com.amazonaws.services.glue.AWSGlue; +import com.amazonaws.services.glue.AWSGlueClientBuilder; +import com.amazonaws.services.glue.model.AlreadyExistsException; +import com.amazonaws.services.glue.model.BatchCreatePartitionRequest; +import com.amazonaws.services.glue.model.BatchCreatePartitionResult; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequest; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequestEntry; +import com.amazonaws.services.glue.model.BatchUpdatePartitionResult; +import com.amazonaws.services.glue.model.Column; +import com.amazonaws.services.glue.model.CreateDatabaseRequest; +import com.amazonaws.services.glue.model.CreateDatabaseResult; +import com.amazonaws.services.glue.model.CreateTableRequest; +import com.amazonaws.services.glue.model.CreateTableResult; +import com.amazonaws.services.glue.model.DatabaseInput; +import com.amazonaws.services.glue.model.EntityNotFoundException; +import com.amazonaws.services.glue.model.GetDatabaseRequest; +import com.amazonaws.services.glue.model.GetPartitionsRequest; +import com.amazonaws.services.glue.model.GetPartitionsResult; +import com.amazonaws.services.glue.model.GetTableRequest; +import com.amazonaws.services.glue.model.PartitionInput; +import com.amazonaws.services.glue.model.SerDeInfo; +import com.amazonaws.services.glue.model.StorageDescriptor; +import com.amazonaws.services.glue.model.Table; +import com.amazonaws.services.glue.model.TableInput; +import com.amazonaws.services.glue.model.UpdateTableRequest; +import org.apache.avro.Schema; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.parquet.schema.MessageType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.apache.hudi.aws.utils.S3Utils.s3aToS3; +import static org.apache.hudi.common.util.MapUtils.nonEmpty; +import static org.apache.hudi.hive.util.HiveSchemaUtil.getPartitionKeyType; +import static org.apache.hudi.hive.util.HiveSchemaUtil.parquetSchemaToMapSchema; +import static org.apache.hudi.sync.common.util.TableUtils.tableId; + +/** + * This class implements all the AWS APIs to enable syncing of a Hudi Table with the + * AWS Glue Data Catalog (https://docs.aws.amazon.com/glue/latest/dg/populate-data-catalog.html). + */ +public class AWSGlueCatalogClient extends AbstractHiveSyncHoodieClient { + + private static final Logger LOG = LogManager.getLogger(AWSGlueCatalogClient.class); + private static final int MAX_PARTITIONS_PER_REQUEST = 100; + private static final int BATCH_REQUEST_SLEEP_SECONDS = 1; + private final AWSGlue awsGlue; + private final String databaseName; + + public AWSGlueCatalogClient(HiveSyncConfig syncConfig, Configuration hadoopConf, FileSystem fs) { + super(syncConfig, hadoopConf, fs); + this.awsGlue = AWSGlueClientBuilder.standard().build(); + this.databaseName = syncConfig.databaseName; + } + + @Override + public List<Partition> getAllPartitions(String tableName) { + try { + GetPartitionsRequest request = new GetPartitionsRequest(); + request.withDatabaseName(databaseName).withTableName(tableName); + GetPartitionsResult result = awsGlue.getPartitions(request); + return result.getPartitions() + .stream() + .map(p -> new Partition(p.getValues(), p.getStorageDescriptor().getLocation())) + .collect(Collectors.toList()); + } catch (Exception e) { + throw new HoodieGlueSyncException("Failed to get all partitions for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void addPartitionsToTable(String tableName, List<String> partitionsToAdd) { + if (partitionsToAdd.isEmpty()) { + LOG.info("No partitions to add for " + tableId(databaseName, tableName)); + return; + } + LOG.info("Adding " + partitionsToAdd.size() + " partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<PartitionInput> partitionInputs = partitionsToAdd.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + partitionSd.setLocation(fullPartitionPath); + return new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + }).collect(Collectors.toList()); + + for (List<PartitionInput> batch : CollectionUtils.batches(partitionInputs, MAX_PARTITIONS_PER_REQUEST)) { + BatchCreatePartitionRequest request = new BatchCreatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withPartitionInputList(batch); + + BatchCreatePartitionResult result = awsGlue.batchCreatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updatePartitionsToTable(String tableName, List<String> changedPartitions) { + if (changedPartitions.isEmpty()) { + LOG.info("No partitions to change for " + tableName); + return; + } + LOG.info("Updating " + changedPartitions.size() + "partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<BatchUpdatePartitionRequestEntry> updatePartitionEntries = changedPartitions.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + sd.setLocation(fullPartitionPath); + PartitionInput partitionInput = new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + return new BatchUpdatePartitionRequestEntry().withPartitionInput(partitionInput).withPartitionValueList(partitionValues); + }).collect(Collectors.toList()); + + for (List<BatchUpdatePartitionRequestEntry> batch : CollectionUtils.batches(updatePartitionEntries, MAX_PARTITIONS_PER_REQUEST)) { + BatchUpdatePartitionRequest request = new BatchUpdatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withEntries(batch); + + BatchUpdatePartitionResult result = awsGlue.batchUpdatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void dropPartitionsToTable(String tableName, List<String> partitionsToDrop) { + throw new UnsupportedOperationException("Not support dropPartitionsToTable yet."); + } + + /** + * Update the table properties to the table. + */ + @Override + public void updateTableProperties(String tableName, Map<String, String> tableProperties) { + if (nonEmpty(tableProperties)) { + return; + } + try { + updateTableParameters(awsGlue, databaseName, tableName, tableProperties, true); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update properties for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updateTableDefinition(String tableName, MessageType newSchema) { + // ToDo Cascade is set in Hive meta sync, but need to investigate how to configure it for Glue meta + boolean cascade = syncConfig.partitionFields.size() > 0; + try { + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> newSchemaMap = parquetSchemaToMapSchema(newSchema, syncConfig.supportTimestamp, false); + List<Column> newColumns = newSchemaMap.keySet().stream().map(key -> { + String keyType = getPartitionKeyType(newSchemaMap, key); + return new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + }).collect(Collectors.toList()); + StorageDescriptor sd = table.getStorageDescriptor(); + sd.setColumns(newColumns); + + final Date now = new Date(); + TableInput updatedTableInput = new TableInput() + .withName(tableName) + .withTableType(table.getTableType()) + .withParameters(table.getParameters()) + .withPartitionKeys(table.getPartitionKeys()) + .withStorageDescriptor(sd) + .withLastAccessTime(now) + .withLastAnalyzedTime(now); + + UpdateTableRequest request = new UpdateTableRequest() + .withDatabaseName(databaseName) + .withTableInput(updatedTableInput); + + awsGlue.updateTable(request); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update definition for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public List<FieldSchema> getTableCommentUsingMetastoreClient(String tableName) { + // no op; unsupported + return Collections.emptyList(); + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, List<Schema.Field> newSchema) { + // no op; unsupported + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, Map<String, String> newComments) { + // no op; unsupported + } + + @Override + public void createTable(String tableName, + MessageType storageSchema, + String inputFormatClass, + String outputFormatClass, + String serdeClass, + Map<String, String> serdeProperties, + Map<String, String> tableProperties) { + if (tableExists(tableName)) { + return; + } + CreateTableRequest request = new CreateTableRequest(); + Map<String, String> params = new HashMap<>(); + if (!syncConfig.createManagedTable) { + params.put("EXTERNAL", "TRUE"); + } + params.putAll(tableProperties); + + try { + Map<String, String> mapSchema = parquetSchemaToMapSchema(storageSchema, syncConfig.supportTimestamp, false); + + List<Column> schemaPartitionKeys = new ArrayList<>(); + List<Column> schemaWithoutPartitionKeys = new ArrayList<>(); + for (String key : mapSchema.keySet()) { + String keyType = getPartitionKeyType(mapSchema, key); + Column column = new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + // In Glue, the full schema should exclude the partition keys + if (syncConfig.partitionFields.contains(key)) { + schemaPartitionKeys.add(column); + } else { + schemaWithoutPartitionKeys.add(column); + } + } + + StorageDescriptor storageDescriptor = new StorageDescriptor(); + serdeProperties.put("serialization.format", "1"); + storageDescriptor + .withSerdeInfo(new SerDeInfo().withSerializationLibrary(serdeClass).withParameters(serdeProperties)) + .withLocation(s3aToS3(syncConfig.basePath)) + .withInputFormat(inputFormatClass) + .withOutputFormat(outputFormatClass) + .withColumns(schemaWithoutPartitionKeys); + + TableInput tableInput = new TableInput() + .withName(tableName) + .withTableType(TableType.EXTERNAL_TABLE.toString()) + .withParameters(params) + .withPartitionKeys(schemaPartitionKeys) + .withStorageDescriptor(storageDescriptor) + .withLastAccessTime(new Date(System.currentTimeMillis())) + .withLastAnalyzedTime(new Date(System.currentTimeMillis())); + request.withDatabaseName(databaseName) + .withTableInput(tableInput); + + CreateTableResult result = awsGlue.createTable(request); + LOG.info("Created table " + tableId(databaseName, tableName) + " : " + result); + } catch (AlreadyExistsException e) { + LOG.warn("Table " + tableId(databaseName, tableName) + " already exists.", e); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to create " + tableId(databaseName, tableName), e); + } + } + + @Override + public Map<String, String> getTableSchema(String tableName) { + try { + // GlueMetastoreClient returns partition keys separate from Columns, hence get both and merge to + // get the Schema of the table. + final long start = System.currentTimeMillis(); + Table table = getTable(awsGlue, databaseName, tableName); Review comment: In HoodieHiveClient first we check tableExists() and then proceed further. Do you want to follow the same pattern here. will let you take the call. ########## File path: hudi-aws/src/main/java/org/apache/hudi/aws/sync/AwsGlueCatalogSyncTool.java ########## @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.aws.sync; + +import org.apache.hudi.common.config.TypedProperties; +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.hive.HiveSyncConfig; +import org.apache.hudi.hive.HiveSyncTool; + +import com.beust.jcommander.JCommander; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.conf.HiveConf; + +/** + * Currently Experimental. Utility class that implements syncing a Hudi Table with the + * AWS Glue Data Catalog (https://docs.aws.amazon.com/glue/latest/dg/populate-data-catalog.html) + * to enable querying via Glue ETLs, Athena etc. + * + * Extends HiveSyncTool since most logic is similar to Hive syncing, + * expect using a different client {@link AWSGlueCatalogClient} that implements + * the necessary functionality using Glue APIs. + */ +public class AwsGlueCatalogSyncTool extends HiveSyncTool { Review comment: Experimental annotation ? ########## File path: hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogClient.java ########## @@ -0,0 +1,483 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.aws.sync; + +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.hive.AbstractHiveSyncHoodieClient; +import org.apache.hudi.hive.HiveSyncConfig; +import org.apache.hudi.sync.common.model.Partition; + +import com.amazonaws.services.glue.AWSGlue; +import com.amazonaws.services.glue.AWSGlueClientBuilder; +import com.amazonaws.services.glue.model.AlreadyExistsException; +import com.amazonaws.services.glue.model.BatchCreatePartitionRequest; +import com.amazonaws.services.glue.model.BatchCreatePartitionResult; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequest; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequestEntry; +import com.amazonaws.services.glue.model.BatchUpdatePartitionResult; +import com.amazonaws.services.glue.model.Column; +import com.amazonaws.services.glue.model.CreateDatabaseRequest; +import com.amazonaws.services.glue.model.CreateDatabaseResult; +import com.amazonaws.services.glue.model.CreateTableRequest; +import com.amazonaws.services.glue.model.CreateTableResult; +import com.amazonaws.services.glue.model.DatabaseInput; +import com.amazonaws.services.glue.model.EntityNotFoundException; +import com.amazonaws.services.glue.model.GetDatabaseRequest; +import com.amazonaws.services.glue.model.GetPartitionsRequest; +import com.amazonaws.services.glue.model.GetPartitionsResult; +import com.amazonaws.services.glue.model.GetTableRequest; +import com.amazonaws.services.glue.model.PartitionInput; +import com.amazonaws.services.glue.model.SerDeInfo; +import com.amazonaws.services.glue.model.StorageDescriptor; +import com.amazonaws.services.glue.model.Table; +import com.amazonaws.services.glue.model.TableInput; +import com.amazonaws.services.glue.model.UpdateTableRequest; +import org.apache.avro.Schema; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.parquet.schema.MessageType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.apache.hudi.aws.utils.S3Utils.s3aToS3; +import static org.apache.hudi.common.util.MapUtils.nonEmpty; +import static org.apache.hudi.hive.util.HiveSchemaUtil.getPartitionKeyType; +import static org.apache.hudi.hive.util.HiveSchemaUtil.parquetSchemaToMapSchema; +import static org.apache.hudi.sync.common.util.TableUtils.tableId; + +/** + * This class implements all the AWS APIs to enable syncing of a Hudi Table with the + * AWS Glue Data Catalog (https://docs.aws.amazon.com/glue/latest/dg/populate-data-catalog.html). + */ +public class AWSGlueCatalogClient extends AbstractHiveSyncHoodieClient { + + private static final Logger LOG = LogManager.getLogger(AWSGlueCatalogClient.class); + private static final int MAX_PARTITIONS_PER_REQUEST = 100; + private static final int BATCH_REQUEST_SLEEP_SECONDS = 1; + private final AWSGlue awsGlue; + private final String databaseName; + + public AWSGlueCatalogClient(HiveSyncConfig syncConfig, Configuration hadoopConf, FileSystem fs) { + super(syncConfig, hadoopConf, fs); + this.awsGlue = AWSGlueClientBuilder.standard().build(); + this.databaseName = syncConfig.databaseName; + } + + @Override + public List<Partition> getAllPartitions(String tableName) { + try { + GetPartitionsRequest request = new GetPartitionsRequest(); + request.withDatabaseName(databaseName).withTableName(tableName); + GetPartitionsResult result = awsGlue.getPartitions(request); + return result.getPartitions() + .stream() + .map(p -> new Partition(p.getValues(), p.getStorageDescriptor().getLocation())) + .collect(Collectors.toList()); + } catch (Exception e) { + throw new HoodieGlueSyncException("Failed to get all partitions for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void addPartitionsToTable(String tableName, List<String> partitionsToAdd) { + if (partitionsToAdd.isEmpty()) { + LOG.info("No partitions to add for " + tableId(databaseName, tableName)); + return; + } + LOG.info("Adding " + partitionsToAdd.size() + " partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<PartitionInput> partitionInputs = partitionsToAdd.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + partitionSd.setLocation(fullPartitionPath); + return new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + }).collect(Collectors.toList()); + + for (List<PartitionInput> batch : CollectionUtils.batches(partitionInputs, MAX_PARTITIONS_PER_REQUEST)) { + BatchCreatePartitionRequest request = new BatchCreatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withPartitionInputList(batch); + + BatchCreatePartitionResult result = awsGlue.batchCreatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updatePartitionsToTable(String tableName, List<String> changedPartitions) { + if (changedPartitions.isEmpty()) { + LOG.info("No partitions to change for " + tableName); + return; + } + LOG.info("Updating " + changedPartitions.size() + "partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<BatchUpdatePartitionRequestEntry> updatePartitionEntries = changedPartitions.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + sd.setLocation(fullPartitionPath); + PartitionInput partitionInput = new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + return new BatchUpdatePartitionRequestEntry().withPartitionInput(partitionInput).withPartitionValueList(partitionValues); + }).collect(Collectors.toList()); + + for (List<BatchUpdatePartitionRequestEntry> batch : CollectionUtils.batches(updatePartitionEntries, MAX_PARTITIONS_PER_REQUEST)) { + BatchUpdatePartitionRequest request = new BatchUpdatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withEntries(batch); + + BatchUpdatePartitionResult result = awsGlue.batchUpdatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void dropPartitionsToTable(String tableName, List<String> partitionsToDrop) { + throw new UnsupportedOperationException("Not support dropPartitionsToTable yet."); + } + + /** + * Update the table properties to the table. + */ + @Override + public void updateTableProperties(String tableName, Map<String, String> tableProperties) { + if (nonEmpty(tableProperties)) { + return; + } + try { + updateTableParameters(awsGlue, databaseName, tableName, tableProperties, true); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update properties for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updateTableDefinition(String tableName, MessageType newSchema) { + // ToDo Cascade is set in Hive meta sync, but need to investigate how to configure it for Glue meta + boolean cascade = syncConfig.partitionFields.size() > 0; + try { + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> newSchemaMap = parquetSchemaToMapSchema(newSchema, syncConfig.supportTimestamp, false); + List<Column> newColumns = newSchemaMap.keySet().stream().map(key -> { + String keyType = getPartitionKeyType(newSchemaMap, key); + return new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + }).collect(Collectors.toList()); + StorageDescriptor sd = table.getStorageDescriptor(); + sd.setColumns(newColumns); + + final Date now = new Date(); + TableInput updatedTableInput = new TableInput() + .withName(tableName) + .withTableType(table.getTableType()) + .withParameters(table.getParameters()) + .withPartitionKeys(table.getPartitionKeys()) + .withStorageDescriptor(sd) + .withLastAccessTime(now) + .withLastAnalyzedTime(now); + + UpdateTableRequest request = new UpdateTableRequest() + .withDatabaseName(databaseName) + .withTableInput(updatedTableInput); + + awsGlue.updateTable(request); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update definition for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public List<FieldSchema> getTableCommentUsingMetastoreClient(String tableName) { + // no op; unsupported + return Collections.emptyList(); + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, List<Schema.Field> newSchema) { + // no op; unsupported + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, Map<String, String> newComments) { + // no op; unsupported + } + + @Override + public void createTable(String tableName, + MessageType storageSchema, + String inputFormatClass, + String outputFormatClass, + String serdeClass, + Map<String, String> serdeProperties, + Map<String, String> tableProperties) { + if (tableExists(tableName)) { + return; + } + CreateTableRequest request = new CreateTableRequest(); + Map<String, String> params = new HashMap<>(); + if (!syncConfig.createManagedTable) { + params.put("EXTERNAL", "TRUE"); + } + params.putAll(tableProperties); + + try { + Map<String, String> mapSchema = parquetSchemaToMapSchema(storageSchema, syncConfig.supportTimestamp, false); + + List<Column> schemaPartitionKeys = new ArrayList<>(); + List<Column> schemaWithoutPartitionKeys = new ArrayList<>(); + for (String key : mapSchema.keySet()) { + String keyType = getPartitionKeyType(mapSchema, key); + Column column = new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + // In Glue, the full schema should exclude the partition keys + if (syncConfig.partitionFields.contains(key)) { + schemaPartitionKeys.add(column); + } else { + schemaWithoutPartitionKeys.add(column); + } + } + + StorageDescriptor storageDescriptor = new StorageDescriptor(); + serdeProperties.put("serialization.format", "1"); + storageDescriptor + .withSerdeInfo(new SerDeInfo().withSerializationLibrary(serdeClass).withParameters(serdeProperties)) + .withLocation(s3aToS3(syncConfig.basePath)) + .withInputFormat(inputFormatClass) + .withOutputFormat(outputFormatClass) + .withColumns(schemaWithoutPartitionKeys); + + TableInput tableInput = new TableInput() + .withName(tableName) + .withTableType(TableType.EXTERNAL_TABLE.toString()) + .withParameters(params) + .withPartitionKeys(schemaPartitionKeys) + .withStorageDescriptor(storageDescriptor) + .withLastAccessTime(new Date(System.currentTimeMillis())) + .withLastAnalyzedTime(new Date(System.currentTimeMillis())); + request.withDatabaseName(databaseName) + .withTableInput(tableInput); + + CreateTableResult result = awsGlue.createTable(request); + LOG.info("Created table " + tableId(databaseName, tableName) + " : " + result); + } catch (AlreadyExistsException e) { + LOG.warn("Table " + tableId(databaseName, tableName) + " already exists.", e); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to create " + tableId(databaseName, tableName), e); + } + } + + @Override + public Map<String, String> getTableSchema(String tableName) { + try { + // GlueMetastoreClient returns partition keys separate from Columns, hence get both and merge to + // get the Schema of the table. + final long start = System.currentTimeMillis(); + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> partitionKeysMap = + table.getPartitionKeys().stream().collect(Collectors.toMap(Column::getName, f -> f.getType().toUpperCase())); + + Map<String, String> columnsMap = + table.getStorageDescriptor().getColumns().stream().collect(Collectors.toMap(Column::getName, f -> f.getType().toUpperCase())); + + Map<String, String> schema = new HashMap<>(); + schema.putAll(columnsMap); + schema.putAll(partitionKeysMap); + final long end = System.currentTimeMillis(); + LOG.info(String.format("Time taken to getTableSchema: %s ms", (end - start))); Review comment: any specific reason to log the time taken just for this method and not anywhere else. ########## File path: hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/AbstractHiveSyncHoodieClient.java ########## @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.hive; + +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.common.table.TableSchemaResolver; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.util.ReflectionUtils; +import org.apache.hudi.sync.common.AbstractSyncHoodieClient; +import org.apache.hudi.sync.common.HoodieSyncException; +import org.apache.hudi.sync.common.model.Partition; + +import org.apache.avro.Schema; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.parquet.schema.MessageType; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Base class to sync Hudi tables with Hive based metastores, such as Hive server, HMS or managed Hive services. + */ +public abstract class AbstractHiveSyncHoodieClient extends AbstractSyncHoodieClient { + + protected final HoodieTimeline activeTimeline; + protected final HiveSyncConfig syncConfig; + protected final Configuration hadoopConf; + protected final PartitionValueExtractor partitionValueExtractor; + + public AbstractHiveSyncHoodieClient(HiveSyncConfig syncConfig, Configuration hadoopConf, FileSystem fs) { + super(syncConfig.basePath, syncConfig.assumeDatePartitioning, syncConfig.useFileListingFromMetadata, syncConfig.withOperationField, fs); + this.syncConfig = syncConfig; + this.hadoopConf = hadoopConf; + this.partitionValueExtractor = ReflectionUtils.loadClass(syncConfig.partitionValueExtractorClass); + this.activeTimeline = metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants(); + } + + public HoodieTimeline getActiveTimeline() { + return activeTimeline; + } + + /** + * Iterate over the storage partitions and find if there are any new partitions that need to be added or updated. + * Generate a list of PartitionEvent based on the changes required. + */ + protected List<PartitionEvent> getPartitionEvents(List<Partition> tablePartitions, List<String> partitionStoragePartitions, boolean isDropPartition) { + Map<String, String> paths = new HashMap<>(); + for (Partition tablePartition : tablePartitions) { + List<String> hivePartitionValues = tablePartition.getValues(); + String fullTablePartitionPath = + Path.getPathWithoutSchemeAndAuthority(new Path(tablePartition.getStorageLocation())).toUri().getPath(); + paths.put(String.join(", ", hivePartitionValues), fullTablePartitionPath); + } + + List<PartitionEvent> events = new ArrayList<>(); + for (String storagePartition : partitionStoragePartitions) { + Path storagePartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, storagePartition); + String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); + // Check if the partition values or if hdfs path is the same + List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition); + + if (isDropPartition) { + events.add(PartitionEvent.newPartitionDropEvent(storagePartition)); + } else { + if (!storagePartitionValues.isEmpty()) { + String storageValue = String.join(", ", storagePartitionValues); + if (!paths.containsKey(storageValue)) { + events.add(PartitionEvent.newPartitionAddEvent(storagePartition)); + } else if (!paths.get(storageValue).equals(fullStoragePartitionPath)) { + events.add(PartitionEvent.newPartitionUpdateEvent(storagePartition)); + } + } + } + } + return events; + } + + public abstract List<Partition> getAllPartitions(String tableName); + + public abstract boolean databaseExists(String databaseName); + + public abstract void createDatabase(String databaseName); Review comment: can we add some java docs. may be in a follow up PR. some methods are pretty self explanatory. but for some, would be good to add some details. ########## File path: hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogClient.java ########## @@ -0,0 +1,483 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.aws.sync; + +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.hive.AbstractHiveSyncHoodieClient; +import org.apache.hudi.hive.HiveSyncConfig; +import org.apache.hudi.sync.common.model.Partition; + +import com.amazonaws.services.glue.AWSGlue; +import com.amazonaws.services.glue.AWSGlueClientBuilder; +import com.amazonaws.services.glue.model.AlreadyExistsException; +import com.amazonaws.services.glue.model.BatchCreatePartitionRequest; +import com.amazonaws.services.glue.model.BatchCreatePartitionResult; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequest; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequestEntry; +import com.amazonaws.services.glue.model.BatchUpdatePartitionResult; +import com.amazonaws.services.glue.model.Column; +import com.amazonaws.services.glue.model.CreateDatabaseRequest; +import com.amazonaws.services.glue.model.CreateDatabaseResult; +import com.amazonaws.services.glue.model.CreateTableRequest; +import com.amazonaws.services.glue.model.CreateTableResult; +import com.amazonaws.services.glue.model.DatabaseInput; +import com.amazonaws.services.glue.model.EntityNotFoundException; +import com.amazonaws.services.glue.model.GetDatabaseRequest; +import com.amazonaws.services.glue.model.GetPartitionsRequest; +import com.amazonaws.services.glue.model.GetPartitionsResult; +import com.amazonaws.services.glue.model.GetTableRequest; +import com.amazonaws.services.glue.model.PartitionInput; +import com.amazonaws.services.glue.model.SerDeInfo; +import com.amazonaws.services.glue.model.StorageDescriptor; +import com.amazonaws.services.glue.model.Table; +import com.amazonaws.services.glue.model.TableInput; +import com.amazonaws.services.glue.model.UpdateTableRequest; +import org.apache.avro.Schema; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.parquet.schema.MessageType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.apache.hudi.aws.utils.S3Utils.s3aToS3; +import static org.apache.hudi.common.util.MapUtils.nonEmpty; +import static org.apache.hudi.hive.util.HiveSchemaUtil.getPartitionKeyType; +import static org.apache.hudi.hive.util.HiveSchemaUtil.parquetSchemaToMapSchema; +import static org.apache.hudi.sync.common.util.TableUtils.tableId; + +/** + * This class implements all the AWS APIs to enable syncing of a Hudi Table with the + * AWS Glue Data Catalog (https://docs.aws.amazon.com/glue/latest/dg/populate-data-catalog.html). + */ +public class AWSGlueCatalogClient extends AbstractHiveSyncHoodieClient { + + private static final Logger LOG = LogManager.getLogger(AWSGlueCatalogClient.class); + private static final int MAX_PARTITIONS_PER_REQUEST = 100; + private static final int BATCH_REQUEST_SLEEP_SECONDS = 1; + private final AWSGlue awsGlue; + private final String databaseName; + + public AWSGlueCatalogClient(HiveSyncConfig syncConfig, Configuration hadoopConf, FileSystem fs) { + super(syncConfig, hadoopConf, fs); + this.awsGlue = AWSGlueClientBuilder.standard().build(); + this.databaseName = syncConfig.databaseName; + } + + @Override + public List<Partition> getAllPartitions(String tableName) { + try { + GetPartitionsRequest request = new GetPartitionsRequest(); + request.withDatabaseName(databaseName).withTableName(tableName); + GetPartitionsResult result = awsGlue.getPartitions(request); + return result.getPartitions() + .stream() + .map(p -> new Partition(p.getValues(), p.getStorageDescriptor().getLocation())) + .collect(Collectors.toList()); + } catch (Exception e) { + throw new HoodieGlueSyncException("Failed to get all partitions for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void addPartitionsToTable(String tableName, List<String> partitionsToAdd) { + if (partitionsToAdd.isEmpty()) { + LOG.info("No partitions to add for " + tableId(databaseName, tableName)); + return; + } + LOG.info("Adding " + partitionsToAdd.size() + " partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<PartitionInput> partitionInputs = partitionsToAdd.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + partitionSd.setLocation(fullPartitionPath); + return new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + }).collect(Collectors.toList()); + + for (List<PartitionInput> batch : CollectionUtils.batches(partitionInputs, MAX_PARTITIONS_PER_REQUEST)) { + BatchCreatePartitionRequest request = new BatchCreatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withPartitionInputList(batch); + + BatchCreatePartitionResult result = awsGlue.batchCreatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updatePartitionsToTable(String tableName, List<String> changedPartitions) { + if (changedPartitions.isEmpty()) { + LOG.info("No partitions to change for " + tableName); + return; + } + LOG.info("Updating " + changedPartitions.size() + "partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<BatchUpdatePartitionRequestEntry> updatePartitionEntries = changedPartitions.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + sd.setLocation(fullPartitionPath); + PartitionInput partitionInput = new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + return new BatchUpdatePartitionRequestEntry().withPartitionInput(partitionInput).withPartitionValueList(partitionValues); + }).collect(Collectors.toList()); + + for (List<BatchUpdatePartitionRequestEntry> batch : CollectionUtils.batches(updatePartitionEntries, MAX_PARTITIONS_PER_REQUEST)) { + BatchUpdatePartitionRequest request = new BatchUpdatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withEntries(batch); + + BatchUpdatePartitionResult result = awsGlue.batchUpdatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void dropPartitionsToTable(String tableName, List<String> partitionsToDrop) { + throw new UnsupportedOperationException("Not support dropPartitionsToTable yet."); + } + + /** + * Update the table properties to the table. + */ + @Override + public void updateTableProperties(String tableName, Map<String, String> tableProperties) { + if (nonEmpty(tableProperties)) { + return; + } + try { + updateTableParameters(awsGlue, databaseName, tableName, tableProperties, true); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update properties for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updateTableDefinition(String tableName, MessageType newSchema) { + // ToDo Cascade is set in Hive meta sync, but need to investigate how to configure it for Glue meta + boolean cascade = syncConfig.partitionFields.size() > 0; + try { + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> newSchemaMap = parquetSchemaToMapSchema(newSchema, syncConfig.supportTimestamp, false); + List<Column> newColumns = newSchemaMap.keySet().stream().map(key -> { + String keyType = getPartitionKeyType(newSchemaMap, key); + return new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + }).collect(Collectors.toList()); + StorageDescriptor sd = table.getStorageDescriptor(); + sd.setColumns(newColumns); + + final Date now = new Date(); + TableInput updatedTableInput = new TableInput() + .withName(tableName) + .withTableType(table.getTableType()) + .withParameters(table.getParameters()) + .withPartitionKeys(table.getPartitionKeys()) + .withStorageDescriptor(sd) + .withLastAccessTime(now) + .withLastAnalyzedTime(now); + + UpdateTableRequest request = new UpdateTableRequest() + .withDatabaseName(databaseName) + .withTableInput(updatedTableInput); + + awsGlue.updateTable(request); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update definition for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public List<FieldSchema> getTableCommentUsingMetastoreClient(String tableName) { + // no op; unsupported + return Collections.emptyList(); + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, List<Schema.Field> newSchema) { + // no op; unsupported + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, Map<String, String> newComments) { + // no op; unsupported + } + + @Override + public void createTable(String tableName, + MessageType storageSchema, + String inputFormatClass, + String outputFormatClass, + String serdeClass, + Map<String, String> serdeProperties, + Map<String, String> tableProperties) { + if (tableExists(tableName)) { + return; + } + CreateTableRequest request = new CreateTableRequest(); + Map<String, String> params = new HashMap<>(); + if (!syncConfig.createManagedTable) { + params.put("EXTERNAL", "TRUE"); + } + params.putAll(tableProperties); + + try { + Map<String, String> mapSchema = parquetSchemaToMapSchema(storageSchema, syncConfig.supportTimestamp, false); + + List<Column> schemaPartitionKeys = new ArrayList<>(); + List<Column> schemaWithoutPartitionKeys = new ArrayList<>(); + for (String key : mapSchema.keySet()) { + String keyType = getPartitionKeyType(mapSchema, key); + Column column = new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + // In Glue, the full schema should exclude the partition keys + if (syncConfig.partitionFields.contains(key)) { + schemaPartitionKeys.add(column); + } else { + schemaWithoutPartitionKeys.add(column); + } + } + + StorageDescriptor storageDescriptor = new StorageDescriptor(); + serdeProperties.put("serialization.format", "1"); + storageDescriptor + .withSerdeInfo(new SerDeInfo().withSerializationLibrary(serdeClass).withParameters(serdeProperties)) + .withLocation(s3aToS3(syncConfig.basePath)) + .withInputFormat(inputFormatClass) + .withOutputFormat(outputFormatClass) + .withColumns(schemaWithoutPartitionKeys); + + TableInput tableInput = new TableInput() + .withName(tableName) + .withTableType(TableType.EXTERNAL_TABLE.toString()) + .withParameters(params) + .withPartitionKeys(schemaPartitionKeys) + .withStorageDescriptor(storageDescriptor) + .withLastAccessTime(new Date(System.currentTimeMillis())) + .withLastAnalyzedTime(new Date(System.currentTimeMillis())); + request.withDatabaseName(databaseName) + .withTableInput(tableInput); + + CreateTableResult result = awsGlue.createTable(request); + LOG.info("Created table " + tableId(databaseName, tableName) + " : " + result); + } catch (AlreadyExistsException e) { + LOG.warn("Table " + tableId(databaseName, tableName) + " already exists.", e); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to create " + tableId(databaseName, tableName), e); + } + } + + @Override + public Map<String, String> getTableSchema(String tableName) { + try { + // GlueMetastoreClient returns partition keys separate from Columns, hence get both and merge to + // get the Schema of the table. + final long start = System.currentTimeMillis(); + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> partitionKeysMap = + table.getPartitionKeys().stream().collect(Collectors.toMap(Column::getName, f -> f.getType().toUpperCase())); + + Map<String, String> columnsMap = + table.getStorageDescriptor().getColumns().stream().collect(Collectors.toMap(Column::getName, f -> f.getType().toUpperCase())); + + Map<String, String> schema = new HashMap<>(); + schema.putAll(columnsMap); + schema.putAll(partitionKeysMap); + final long end = System.currentTimeMillis(); + LOG.info(String.format("Time taken to getTableSchema: %s ms", (end - start))); + return schema; + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to get schema for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public boolean doesTableExist(String tableName) { + return tableExists(tableName); + } + + @Override + public boolean tableExists(String tableName) { + GetTableRequest request = new GetTableRequest() + .withDatabaseName(databaseName) + .withName(tableName); + try { + return Objects.nonNull(awsGlue.getTable(request).getTable()); + } catch (EntityNotFoundException e) { + LOG.info("Table not found: " + tableId(databaseName, tableName), e); + return false; + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to get table: " + tableId(databaseName, tableName), e); + } + } + + @Override + public boolean databaseExists(String databaseName) { + GetDatabaseRequest request = new GetDatabaseRequest(); + request.setName(databaseName); + try { + return Objects.nonNull(awsGlue.getDatabase(request).getDatabase()); + } catch (EntityNotFoundException e) { + LOG.info("Database not found: " + databaseName, e); + return false; + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to check if database exists " + databaseName, e); + } + } + + @Override + public void createDatabase(String databaseName) { + if (databaseExists(databaseName)) { + return; + } + CreateDatabaseRequest request = new CreateDatabaseRequest(); + request.setDatabaseInput(new DatabaseInput() + .withName(databaseName) + .withDescription("automatically created by hudi") + .withParameters(null) + .withLocationUri(null)); + try { + CreateDatabaseResult result = awsGlue.createDatabase(request); + LOG.info("Successfully created database in AWS Glue: " + result.toString()); + } catch (AlreadyExistsException e) { + LOG.warn("AWS Glue Database " + databaseName + " already exists", e); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to create database " + databaseName, e); + } + } + + @Override + public Option<String> getLastCommitTimeSynced(String tableName) { + try { + Table table = getTable(awsGlue, databaseName, tableName); + return Option.of(table.getParameters().getOrDefault(HOODIE_LAST_COMMIT_TIME_SYNC, null)); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to get last sync commit time for " + tableId(databaseName, tableName), e); + } + } + + @Override + public void close() { + awsGlue.shutdown(); + } + + @Override + public void updateLastCommitTimeSynced(String tableName) { Review comment: A general question about updating HOODIE_LAST_COMMIT_TIME_SYNC. I see in HiveSyncTool, we call hoodieHiveClient.updateLastCommitTimeSynced(tableName) in the end. But updating partitions and updating the last commit time synced is not atomic. and within updateLastCommitTimeSynced, we get the latest commit time from activetimeline and update it as the lastCommitTimeSycned. So, incase of multi-writers, there are chances that timeline has moved between updating partitions and calling updateLastCommitTimeSynced. may be we should file a jira and do a follow up on the logic. ########## File path: hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogClient.java ########## @@ -0,0 +1,483 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.aws.sync; + +import org.apache.hudi.common.fs.FSUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.hive.AbstractHiveSyncHoodieClient; +import org.apache.hudi.hive.HiveSyncConfig; +import org.apache.hudi.sync.common.model.Partition; + +import com.amazonaws.services.glue.AWSGlue; +import com.amazonaws.services.glue.AWSGlueClientBuilder; +import com.amazonaws.services.glue.model.AlreadyExistsException; +import com.amazonaws.services.glue.model.BatchCreatePartitionRequest; +import com.amazonaws.services.glue.model.BatchCreatePartitionResult; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequest; +import com.amazonaws.services.glue.model.BatchUpdatePartitionRequestEntry; +import com.amazonaws.services.glue.model.BatchUpdatePartitionResult; +import com.amazonaws.services.glue.model.Column; +import com.amazonaws.services.glue.model.CreateDatabaseRequest; +import com.amazonaws.services.glue.model.CreateDatabaseResult; +import com.amazonaws.services.glue.model.CreateTableRequest; +import com.amazonaws.services.glue.model.CreateTableResult; +import com.amazonaws.services.glue.model.DatabaseInput; +import com.amazonaws.services.glue.model.EntityNotFoundException; +import com.amazonaws.services.glue.model.GetDatabaseRequest; +import com.amazonaws.services.glue.model.GetPartitionsRequest; +import com.amazonaws.services.glue.model.GetPartitionsResult; +import com.amazonaws.services.glue.model.GetTableRequest; +import com.amazonaws.services.glue.model.PartitionInput; +import com.amazonaws.services.glue.model.SerDeInfo; +import com.amazonaws.services.glue.model.StorageDescriptor; +import com.amazonaws.services.glue.model.Table; +import com.amazonaws.services.glue.model.TableInput; +import com.amazonaws.services.glue.model.UpdateTableRequest; +import org.apache.avro.Schema; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.parquet.schema.MessageType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.apache.hudi.aws.utils.S3Utils.s3aToS3; +import static org.apache.hudi.common.util.MapUtils.nonEmpty; +import static org.apache.hudi.hive.util.HiveSchemaUtil.getPartitionKeyType; +import static org.apache.hudi.hive.util.HiveSchemaUtil.parquetSchemaToMapSchema; +import static org.apache.hudi.sync.common.util.TableUtils.tableId; + +/** + * This class implements all the AWS APIs to enable syncing of a Hudi Table with the + * AWS Glue Data Catalog (https://docs.aws.amazon.com/glue/latest/dg/populate-data-catalog.html). + */ +public class AWSGlueCatalogClient extends AbstractHiveSyncHoodieClient { + + private static final Logger LOG = LogManager.getLogger(AWSGlueCatalogClient.class); + private static final int MAX_PARTITIONS_PER_REQUEST = 100; + private static final int BATCH_REQUEST_SLEEP_SECONDS = 1; + private final AWSGlue awsGlue; + private final String databaseName; + + public AWSGlueCatalogClient(HiveSyncConfig syncConfig, Configuration hadoopConf, FileSystem fs) { + super(syncConfig, hadoopConf, fs); + this.awsGlue = AWSGlueClientBuilder.standard().build(); + this.databaseName = syncConfig.databaseName; + } + + @Override + public List<Partition> getAllPartitions(String tableName) { + try { + GetPartitionsRequest request = new GetPartitionsRequest(); + request.withDatabaseName(databaseName).withTableName(tableName); + GetPartitionsResult result = awsGlue.getPartitions(request); + return result.getPartitions() + .stream() + .map(p -> new Partition(p.getValues(), p.getStorageDescriptor().getLocation())) + .collect(Collectors.toList()); + } catch (Exception e) { + throw new HoodieGlueSyncException("Failed to get all partitions for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void addPartitionsToTable(String tableName, List<String> partitionsToAdd) { + if (partitionsToAdd.isEmpty()) { + LOG.info("No partitions to add for " + tableId(databaseName, tableName)); + return; + } + LOG.info("Adding " + partitionsToAdd.size() + " partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<PartitionInput> partitionInputs = partitionsToAdd.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + partitionSd.setLocation(fullPartitionPath); + return new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + }).collect(Collectors.toList()); + + for (List<PartitionInput> batch : CollectionUtils.batches(partitionInputs, MAX_PARTITIONS_PER_REQUEST)) { + BatchCreatePartitionRequest request = new BatchCreatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withPartitionInputList(batch); + + BatchCreatePartitionResult result = awsGlue.batchCreatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to add partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updatePartitionsToTable(String tableName, List<String> changedPartitions) { + if (changedPartitions.isEmpty()) { + LOG.info("No partitions to change for " + tableName); + return; + } + LOG.info("Updating " + changedPartitions.size() + "partition(s) in table " + tableId(databaseName, tableName)); + try { + Table table = getTable(awsGlue, databaseName, tableName); + StorageDescriptor sd = table.getStorageDescriptor(); + List<BatchUpdatePartitionRequestEntry> updatePartitionEntries = changedPartitions.stream().map(partition -> { + StorageDescriptor partitionSd = sd.clone(); + String fullPartitionPath = FSUtils.getPartitionPath(syncConfig.basePath, partition).toString(); + List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); + sd.setLocation(fullPartitionPath); + PartitionInput partitionInput = new PartitionInput().withValues(partitionValues).withStorageDescriptor(partitionSd); + return new BatchUpdatePartitionRequestEntry().withPartitionInput(partitionInput).withPartitionValueList(partitionValues); + }).collect(Collectors.toList()); + + for (List<BatchUpdatePartitionRequestEntry> batch : CollectionUtils.batches(updatePartitionEntries, MAX_PARTITIONS_PER_REQUEST)) { + BatchUpdatePartitionRequest request = new BatchUpdatePartitionRequest(); + request.withDatabaseName(databaseName).withTableName(tableName).withEntries(batch); + + BatchUpdatePartitionResult result = awsGlue.batchUpdatePartition(request); + if (CollectionUtils.nonEmpty(result.getErrors())) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName) + + " with error(s): " + result.getErrors()); + } + Thread.sleep(BATCH_REQUEST_SLEEP_SECONDS); + } + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update partitions to " + tableId(databaseName, tableName), e); + } + } + + @Override + public void dropPartitionsToTable(String tableName, List<String> partitionsToDrop) { + throw new UnsupportedOperationException("Not support dropPartitionsToTable yet."); + } + + /** + * Update the table properties to the table. + */ + @Override + public void updateTableProperties(String tableName, Map<String, String> tableProperties) { + if (nonEmpty(tableProperties)) { + return; + } + try { + updateTableParameters(awsGlue, databaseName, tableName, tableProperties, true); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update properties for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public void updateTableDefinition(String tableName, MessageType newSchema) { + // ToDo Cascade is set in Hive meta sync, but need to investigate how to configure it for Glue meta + boolean cascade = syncConfig.partitionFields.size() > 0; + try { + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> newSchemaMap = parquetSchemaToMapSchema(newSchema, syncConfig.supportTimestamp, false); + List<Column> newColumns = newSchemaMap.keySet().stream().map(key -> { + String keyType = getPartitionKeyType(newSchemaMap, key); + return new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + }).collect(Collectors.toList()); + StorageDescriptor sd = table.getStorageDescriptor(); + sd.setColumns(newColumns); + + final Date now = new Date(); + TableInput updatedTableInput = new TableInput() + .withName(tableName) + .withTableType(table.getTableType()) + .withParameters(table.getParameters()) + .withPartitionKeys(table.getPartitionKeys()) + .withStorageDescriptor(sd) + .withLastAccessTime(now) + .withLastAnalyzedTime(now); + + UpdateTableRequest request = new UpdateTableRequest() + .withDatabaseName(databaseName) + .withTableInput(updatedTableInput); + + awsGlue.updateTable(request); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to update definition for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public List<FieldSchema> getTableCommentUsingMetastoreClient(String tableName) { + // no op; unsupported + return Collections.emptyList(); + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, List<Schema.Field> newSchema) { + // no op; unsupported + } + + @Override + public void updateTableComments(String tableName, List<FieldSchema> oldSchema, Map<String, String> newComments) { + // no op; unsupported + } + + @Override + public void createTable(String tableName, + MessageType storageSchema, + String inputFormatClass, + String outputFormatClass, + String serdeClass, + Map<String, String> serdeProperties, + Map<String, String> tableProperties) { + if (tableExists(tableName)) { + return; + } + CreateTableRequest request = new CreateTableRequest(); + Map<String, String> params = new HashMap<>(); + if (!syncConfig.createManagedTable) { + params.put("EXTERNAL", "TRUE"); + } + params.putAll(tableProperties); + + try { + Map<String, String> mapSchema = parquetSchemaToMapSchema(storageSchema, syncConfig.supportTimestamp, false); + + List<Column> schemaPartitionKeys = new ArrayList<>(); + List<Column> schemaWithoutPartitionKeys = new ArrayList<>(); + for (String key : mapSchema.keySet()) { + String keyType = getPartitionKeyType(mapSchema, key); + Column column = new Column().withName(key).withType(keyType.toLowerCase()).withComment(""); + // In Glue, the full schema should exclude the partition keys + if (syncConfig.partitionFields.contains(key)) { + schemaPartitionKeys.add(column); + } else { + schemaWithoutPartitionKeys.add(column); + } + } + + StorageDescriptor storageDescriptor = new StorageDescriptor(); + serdeProperties.put("serialization.format", "1"); + storageDescriptor + .withSerdeInfo(new SerDeInfo().withSerializationLibrary(serdeClass).withParameters(serdeProperties)) + .withLocation(s3aToS3(syncConfig.basePath)) + .withInputFormat(inputFormatClass) + .withOutputFormat(outputFormatClass) + .withColumns(schemaWithoutPartitionKeys); + + TableInput tableInput = new TableInput() + .withName(tableName) + .withTableType(TableType.EXTERNAL_TABLE.toString()) + .withParameters(params) + .withPartitionKeys(schemaPartitionKeys) + .withStorageDescriptor(storageDescriptor) + .withLastAccessTime(new Date(System.currentTimeMillis())) + .withLastAnalyzedTime(new Date(System.currentTimeMillis())); + request.withDatabaseName(databaseName) + .withTableInput(tableInput); + + CreateTableResult result = awsGlue.createTable(request); + LOG.info("Created table " + tableId(databaseName, tableName) + " : " + result); + } catch (AlreadyExistsException e) { + LOG.warn("Table " + tableId(databaseName, tableName) + " already exists.", e); + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to create " + tableId(databaseName, tableName), e); + } + } + + @Override + public Map<String, String> getTableSchema(String tableName) { + try { + // GlueMetastoreClient returns partition keys separate from Columns, hence get both and merge to + // get the Schema of the table. + final long start = System.currentTimeMillis(); + Table table = getTable(awsGlue, databaseName, tableName); + Map<String, String> partitionKeysMap = + table.getPartitionKeys().stream().collect(Collectors.toMap(Column::getName, f -> f.getType().toUpperCase())); + + Map<String, String> columnsMap = + table.getStorageDescriptor().getColumns().stream().collect(Collectors.toMap(Column::getName, f -> f.getType().toUpperCase())); + + Map<String, String> schema = new HashMap<>(); + schema.putAll(columnsMap); + schema.putAll(partitionKeysMap); + final long end = System.currentTimeMillis(); + LOG.info(String.format("Time taken to getTableSchema: %s ms", (end - start))); + return schema; + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to get schema for table " + tableId(databaseName, tableName), e); + } + } + + @Override + public boolean doesTableExist(String tableName) { + return tableExists(tableName); + } + + @Override + public boolean tableExists(String tableName) { + GetTableRequest request = new GetTableRequest() + .withDatabaseName(databaseName) + .withName(tableName); + try { + return Objects.nonNull(awsGlue.getTable(request).getTable()); + } catch (EntityNotFoundException e) { + LOG.info("Table not found: " + tableId(databaseName, tableName), e); + return false; + } catch (Exception e) { + throw new HoodieGlueSyncException("Fail to get table: " + tableId(databaseName, tableName), e); + } + } + + @Override + public boolean databaseExists(String databaseName) { + GetDatabaseRequest request = new GetDatabaseRequest(); + request.setName(databaseName); + try { + return Objects.nonNull(awsGlue.getDatabase(request).getDatabase()); + } catch (EntityNotFoundException e) { + LOG.info("Database not found: " + databaseName, e); Review comment: error instead of info -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
