This is an automated email from the ASF dual-hosted git repository.

liuxun pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git


The following commit(s) were added to refs/heads/main by this push:
     new 776144075 [#5517] feat(auth): Paimon catalog supports Ranger plugin 
(#5523)
776144075 is described below

commit 7761440756fed560d765d95af75ae503b1dff586
Author: roryqi <ror...@apache.org>
AuthorDate: Tue Nov 12 20:42:44 2024 +0800

    [#5517] feat(auth): Paimon catalog supports Ranger plugin (#5523)
    
    ### What changes were proposed in this pull request?
    
    Paimon catalog supports Ranger plugin. Kyuubi authz plugin doesn't
    support to update or delete the table.
    
    ### Why are the changes needed?
    
    Fix: #5517
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Add IT.
---
 .../authorization-ranger/build.gradle.kts          |  6 +-
 .../authorization/ranger/RangerAuthorization.java  |  1 +
 .../ranger/RangerAuthorizationHadoopSQLPlugin.java | 20 ++---
 .../authorization/ranger/RangerPrivileges.java     |  6 +-
 .../ranger/integration/test/RangerBaseE2EIT.java   | 34 ++++-----
 .../ranger/integration/test/RangerHiveE2EIT.java   |  2 +-
 .../ranger/integration/test/RangerHiveIT.java      |  4 +-
 .../ranger/integration/test/RangerITEnv.java       |  2 +-
 .../integration/test/RangerIcebergE2EIT.java       |  4 +-
 ...gerIcebergE2EIT.java => RangerPaimonE2EIT.java} | 87 +++++++++++-----------
 docs/security/authorization-pushdown.md            | 40 ++--------
 gradle/libs.versions.toml                          |  1 +
 12 files changed, 92 insertions(+), 115 deletions(-)

diff --git a/authorizations/authorization-ranger/build.gradle.kts 
b/authorizations/authorization-ranger/build.gradle.kts
index 93d90cd4f..0eeb6c4fb 100644
--- a/authorizations/authorization-ranger/build.gradle.kts
+++ b/authorizations/authorization-ranger/build.gradle.kts
@@ -26,9 +26,10 @@ plugins {
 
 val scalaVersion: String = project.properties["scalaVersion"] as? String ?: 
extra["defaultScalaVersion"].toString()
 val sparkVersion: String = libs.versions.spark35.get()
-val kyuubiVersion: String = libs.versions.kyuubi4spark35.get()
+val kyuubiVersion: String = libs.versions.kyuubi4paimon.get()
 val sparkMajorVersion: String = sparkVersion.substringBeforeLast(".")
 val icebergVersion: String = libs.versions.iceberg4spark.get()
+val paimonVersion: String = libs.versions.paimon.get()
 
 dependencies {
   implementation(project(":api")) {
@@ -86,7 +87,7 @@ dependencies {
     exclude("io.dropwizard.metrics")
     exclude("org.rocksdb")
   }
-  
testImplementation("org.apache.kyuubi:kyuubi-spark-authz_$scalaVersion:$kyuubiVersion")
 {
+  
testImplementation("org.apache.kyuubi:kyuubi-spark-authz-shaded_$scalaVersion:$kyuubiVersion")
 {
     exclude("com.sun.jersey")
   }
   testImplementation(libs.hadoop3.client)
@@ -100,6 +101,7 @@ dependencies {
     exclude("io.netty")
   }
   
testImplementation("org.apache.iceberg:iceberg-spark-runtime-${sparkMajorVersion}_$scalaVersion:$icebergVersion")
+  
testImplementation("org.apache.paimon:paimon-spark-$sparkMajorVersion:$paimonVersion")
 }
 
 tasks {
diff --git 
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java
 
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java
index 9f8b42b06..459b6b047 100644
--- 
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java
+++ 
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java
@@ -34,6 +34,7 @@ public class RangerAuthorization extends 
BaseAuthorization<RangerAuthorization>
     switch (catalogProvider) {
       case "hive":
       case "lakehouse-iceberg":
+      case "lakehouse-paimon":
         return RangerAuthorizationHadoopSQLPlugin.getInstance(config);
       default:
         throw new IllegalArgumentException("Unknown catalog provider: " + 
catalogProvider);
diff --git 
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java
 
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java
index f75485aed..9ad2a8c1e 100644
--- 
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java
+++ 
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java
@@ -35,7 +35,7 @@ import org.apache.gravitino.MetadataObject;
 import org.apache.gravitino.authorization.Privilege;
 import org.apache.gravitino.authorization.SecurableObject;
 import org.apache.gravitino.authorization.SecurableObjects;
-import 
org.apache.gravitino.authorization.ranger.RangerPrivileges.RangerHivePrivilege;
+import 
org.apache.gravitino.authorization.ranger.RangerPrivileges.RangerHadoopSQLPrivilege;
 import 
org.apache.gravitino.authorization.ranger.reference.RangerDefines.PolicyResource;
 import org.apache.gravitino.exceptions.AuthorizationPluginException;
 import org.slf4j.Logger;
@@ -96,26 +96,28 @@ public class RangerAuthorizationHadoopSQLPlugin extends 
RangerAuthorizationPlugi
   public Map<Privilege.Name, Set<RangerPrivilege>> privilegesMappingRule() {
     return ImmutableMap.of(
         Privilege.Name.CREATE_CATALOG,
-        ImmutableSet.of(RangerHivePrivilege.CREATE),
+        ImmutableSet.of(RangerHadoopSQLPrivilege.CREATE),
         Privilege.Name.USE_CATALOG,
-        ImmutableSet.of(RangerHivePrivilege.SELECT),
+        ImmutableSet.of(RangerHadoopSQLPrivilege.SELECT),
         Privilege.Name.CREATE_SCHEMA,
-        ImmutableSet.of(RangerHivePrivilege.CREATE),
+        ImmutableSet.of(RangerHadoopSQLPrivilege.CREATE),
         Privilege.Name.USE_SCHEMA,
-        ImmutableSet.of(RangerHivePrivilege.SELECT),
+        ImmutableSet.of(RangerHadoopSQLPrivilege.SELECT),
         Privilege.Name.CREATE_TABLE,
-        ImmutableSet.of(RangerHivePrivilege.CREATE),
+        ImmutableSet.of(RangerHadoopSQLPrivilege.CREATE),
         Privilege.Name.MODIFY_TABLE,
         ImmutableSet.of(
-            RangerHivePrivilege.UPDATE, RangerHivePrivilege.ALTER, 
RangerHivePrivilege.WRITE),
+            RangerHadoopSQLPrivilege.UPDATE,
+            RangerHadoopSQLPrivilege.ALTER,
+            RangerHadoopSQLPrivilege.WRITE),
         Privilege.Name.SELECT_TABLE,
-        ImmutableSet.of(RangerHivePrivilege.READ, RangerHivePrivilege.SELECT));
+        ImmutableSet.of(RangerHadoopSQLPrivilege.READ, 
RangerHadoopSQLPrivilege.SELECT));
   }
 
   @Override
   /** Set the default owner rule. */
   public Set<RangerPrivilege> ownerMappingRule() {
-    return ImmutableSet.of(RangerHivePrivilege.ALL);
+    return ImmutableSet.of(RangerHadoopSQLPrivilege.ALL);
   }
 
   @Override
diff --git 
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java
 
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java
index c54778167..e47b46efc 100644
--- 
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java
+++ 
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java
@@ -25,7 +25,7 @@ import org.apache.gravitino.authorization.Privilege;
 
 public class RangerPrivileges {
   /** Ranger Hive privileges enumeration. */
-  public enum RangerHivePrivilege implements RangerPrivilege {
+  public enum RangerHadoopSQLPrivilege implements RangerPrivilege {
     ALL("all"),
     SELECT("select"),
     UPDATE("update"),
@@ -41,7 +41,7 @@ public class RangerPrivileges {
 
     private final String name; // Access a type in the Ranger policy item
 
-    RangerHivePrivilege(String name) {
+    RangerHadoopSQLPrivilege(String name) {
       this.name = name;
     }
 
@@ -117,7 +117,7 @@ public class RangerPrivileges {
 
   static List<Class<? extends Enum<? extends RangerPrivilege>>> 
allRangerPrivileges =
       Lists.newArrayList(
-          RangerPrivileges.RangerHivePrivilege.class, 
RangerPrivileges.RangerHdfsPrivilege.class);
+          RangerHadoopSQLPrivilege.class, 
RangerPrivileges.RangerHdfsPrivilege.class);
 
   public static RangerPrivilege valueOf(String name) {
     Preconditions.checkArgument(name != null, "Privilege name string cannot be 
null!");
diff --git 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
index 8034e8d7a..95dc4f936 100644
--- 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
+++ 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
@@ -193,7 +193,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
 
   protected abstract void useCatalog() throws InterruptedException;
 
-  protected abstract void checkHaveNoPrivileges();
+  protected abstract void checkWithoutPrivileges();
 
   protected abstract void testAlterTable();
 
@@ -269,7 +269,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
         AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
     metalake.deleteRole(createTableRole);
     metalake.deleteRole(createSchemaRole);
@@ -323,10 +323,10 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     // case 7: If we don't have the role, we can't insert and select from data.
     metalake.deleteRole(readWriteRole);
     waitForUpdatingPolicies();
-    checkHaveNoPrivileges();
+    checkWithoutPrivileges();
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
   }
 
@@ -387,10 +387,10 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     // case 7: If we don't have the role, we can't insert and select from data.
     metalake.deleteRole(roleName);
     waitForUpdatingPolicies();
-    checkHaveNoPrivileges();
+    checkWithoutPrivileges();
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
   }
 
@@ -441,10 +441,10 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     // case 7: If we don't have the role, we can't insert and select from data.
     metalake.deleteRole(readOnlyRole);
     waitForUpdatingPolicies();
-    checkHaveNoPrivileges();
+    checkWithoutPrivileges();
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
   }
 
@@ -496,10 +496,10 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     // case 7: If we don't have the role, we can't insert and select from data.
     metalake.deleteRole(writeOnlyRole);
     waitForUpdatingPolicies();
-    checkHaveNoPrivileges();
+    checkWithoutPrivileges();
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
   }
 
@@ -547,7 +547,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     sparkSession.sql(SQL_CREATE_TABLE);
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
     metalake.deleteRole(roleName);
   }
@@ -690,7 +690,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     sparkSession.sql(SQL_RENAME_BACK_TABLE);
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
     metalake.deleteRole(roleName);
   }
@@ -739,7 +739,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     sparkSession.sql(SQL_INSERT_TABLE);
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
     metalake.deleteRole(roleName);
   }
@@ -774,7 +774,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     metalake.deleteRole(helperRole);
     waitForUpdatingPolicies();
 
-    checkHaveNoPrivileges();
+    checkWithoutPrivileges();
 
     // case 2. user is the  table owner
     MetadataObject tableObject =
@@ -787,7 +787,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     checkTableAllPrivilegesExceptForCreating();
 
     // Delete Gravitino's meta data
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     waitForUpdatingPolicies();
 
     // Fail to create the table
@@ -854,7 +854,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
     sparkSession.sql(SQL_DROP_SCHEMA);
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
   }
 
@@ -915,7 +915,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
         1, rows2.stream().filter(row -> 
row.getString(0).equals(schemaName)).count());
 
     // Clean up
-    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, 
tableName));
     catalog.asSchemas().dropSchema(schemaName, false);
     metalake.revokeRolesFromUser(Lists.newArrayList(roleName), userName1);
     metalake.deleteRole(roleName);
diff --git 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
index 7e3096a61..cb41e7921 100644
--- 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
+++ 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
@@ -120,7 +120,7 @@ public class RangerHiveE2EIT extends RangerBaseE2EIT {
   }
 
   @Override
-  protected void checkHaveNoPrivileges() {
+  protected void checkWithoutPrivileges() {
     Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_INSERT_TABLE));
     Assertions.assertThrows(
         AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
diff --git 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java
 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java
index a72503c2f..da43daca7 100644
--- 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java
+++ 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java
@@ -348,7 +348,7 @@ public class RangerHiveIT {
             RangerMetadataObject.Type.TABLE,
             ImmutableSet.of(
                 new RangerPrivileges.RangerHivePrivilegeImpl(
-                    RangerPrivileges.RangerHivePrivilege.ALL, 
Privilege.Condition.ALLOW)));
+                    RangerPrivileges.RangerHadoopSQLPrivilege.ALL, 
Privilege.Condition.ALLOW)));
     
Assertions.assertNull(rangerHelper.findManagedPolicy(rangerSecurableObject));
 
     // Add a policy for `db3.tab1`
@@ -398,7 +398,7 @@ public class RangerHiveIT {
     policyItem.setAccesses(
         Arrays.asList(
             new RangerPolicy.RangerPolicyItemAccess(
-                RangerPrivileges.RangerHivePrivilege.SELECT.toString())));
+                RangerPrivileges.RangerHadoopSQLPrivilege.SELECT.toString())));
     RangerITEnv.updateOrCreateRangerPolicy(
         RangerDefines.SERVICE_TYPE_HIVE,
         RangerITEnv.RANGER_HIVE_REPO_NAME,
diff --git 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java
 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java
index 31ae3974d..13202add7 100644
--- 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java
+++ 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java
@@ -212,7 +212,7 @@ public class RangerITEnv {
     policyItem.setAccesses(
         Arrays.asList(
             new RangerPolicy.RangerPolicyItemAccess(
-                RangerPrivileges.RangerHivePrivilege.SELECT.toString())));
+                RangerPrivileges.RangerHadoopSQLPrivilege.SELECT.toString())));
     updateOrCreateRangerPolicy(
         RANGER_HIVE_TYPE,
         RANGER_HIVE_REPO_NAME,
diff --git 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
index 648a9c4d7..7b45eda7a 100644
--- 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
+++ 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
@@ -90,7 +90,7 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
     sparkSession =
         SparkSession.builder()
             .master("local[1]")
-            .appName("Ranger Hive E2E integration test")
+            .appName("Ranger Iceberg E2E integration test")
             .config("spark.sql.catalog.iceberg", 
"org.apache.iceberg.spark.SparkCatalog")
             .config("spark.sql.catalog.iceberg.type", "hive")
             .config("spark.sql.catalog.iceberg.uri", HIVE_METASTORE_URIS)
@@ -147,7 +147,7 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
   }
 
   @Override
-  protected void checkHaveNoPrivileges() {
+  protected void checkWithoutPrivileges() {
     Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_INSERT_TABLE));
     Assertions.assertThrows(
         AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
diff --git 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
similarity index 81%
copy from 
authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
copy to 
authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
index 648a9c4d7..7cb600b9d 100644
--- 
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
+++ 
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
@@ -20,7 +20,6 @@ package 
org.apache.gravitino.authorization.ranger.integration.test;
 
 import static org.apache.gravitino.Catalog.AUTHORIZATION_PROVIDER;
 import static 
org.apache.gravitino.authorization.ranger.integration.test.RangerITEnv.currentFunName;
-import static 
org.apache.gravitino.catalog.hive.HiveConstants.IMPERSONATION_ENABLE;
 import static 
org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_AUTH_TYPE;
 import static 
org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_PASSWORD;
 import static 
org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_SERVICE_NAME;
@@ -39,7 +38,6 @@ import org.apache.gravitino.auth.AuthenticatorType;
 import org.apache.gravitino.authorization.Privileges;
 import org.apache.gravitino.authorization.SecurableObject;
 import org.apache.gravitino.authorization.SecurableObjects;
-import org.apache.gravitino.catalog.lakehouse.iceberg.IcebergConstants;
 import org.apache.gravitino.connector.AuthorizationPropertiesMeta;
 import org.apache.gravitino.integration.test.container.HiveContainer;
 import org.apache.gravitino.integration.test.container.RangerContainer;
@@ -54,10 +52,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @Tag("gravitino-docker-test")
-public class RangerIcebergE2EIT extends RangerBaseE2EIT {
-  private static final Logger LOG = 
LoggerFactory.getLogger(RangerIcebergE2EIT.class);
-  private static final String SQL_USE_CATALOG = "USE iceberg";
-  private static final String provider = "lakehouse-iceberg";
+public class RangerPaimonE2EIT extends RangerBaseE2EIT {
+  private static final Logger LOG = 
LoggerFactory.getLogger(RangerPaimonE2EIT.class);
+
+  private static final String provider = "lakehouse-paimon";
+  private static final String SQL_USE_CATALOG = "USE paimon";
 
   @BeforeAll
   public void startIntegrationTest() throws Exception {
@@ -90,25 +89,29 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
     sparkSession =
         SparkSession.builder()
             .master("local[1]")
-            .appName("Ranger Hive E2E integration test")
-            .config("spark.sql.catalog.iceberg", 
"org.apache.iceberg.spark.SparkCatalog")
-            .config("spark.sql.catalog.iceberg.type", "hive")
-            .config("spark.sql.catalog.iceberg.uri", HIVE_METASTORE_URIS)
-            .config("spark.sql.catalog.iceberg.cache-enabled", "false")
+            .appName("Ranger Paimon E2E integration test")
+            .config("spark.sql.catalog.paimon", 
"org.apache.paimon.spark.SparkCatalog")
+            .config("spark.sql.catalog.paimon.metastore", "hive")
+            .config("spark.sql.catalog.paimon.uri", HIVE_METASTORE_URIS)
+            .config(
+                "spark.sql.catalog.paimon.warehouse",
+                String.format(
+                    "hdfs://%s:%d/user/hive/warehouse",
+                    
containerSuite.getHiveRangerContainer().getContainerIpAddress(),
+                    HiveContainer.HDFS_DEFAULTFS_PORT))
+            .config("spark.sql.catalog.paimon.cache-enabled", "false")
             .config(
                 "spark.sql.extensions",
                 
"org.apache.kyuubi.plugin.spark.authz.ranger.RangerSparkExtension,"
-                    + 
"org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions")
+                    + 
"org.apache.paimon.spark.extensions.PaimonSparkSessionExtensions")
             .enableHiveSupport()
             .getOrCreate();
 
     createMetalake();
     createCatalog();
 
-    metalake.addUser(System.getenv(HADOOP_USER_NAME));
-
     RangerITEnv.cleanup();
-    waitForUpdatingPolicies();
+    metalake.addUser(System.getenv(HADOOP_USER_NAME));
   }
 
   @AfterAll
@@ -116,38 +119,53 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
     cleanIT();
   }
 
+  @Override
+  protected void useCatalog() throws InterruptedException {
+    String userName1 = System.getenv(HADOOP_USER_NAME);
+    String roleName = currentFunName();
+    SecurableObject securableObject =
+        SecurableObjects.ofMetalake(
+            metalakeName, Lists.newArrayList(Privileges.UseCatalog.allow()));
+    metalake.createRole(roleName, Collections.emptyMap(), 
Lists.newArrayList(securableObject));
+    metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+    waitForUpdatingPolicies();
+    sparkSession.sql(SQL_USE_CATALOG);
+    metalake.deleteRole(roleName);
+    waitForUpdatingPolicies();
+  }
+
   @Override
   protected void checkUpdateSQLWithReadWritePrivileges() {
-    sparkSession.sql(SQL_UPDATE_TABLE);
+    // Kyuubi Paimon Ranger plugin doesn't support to update yet.
   }
 
   @Override
   protected void checkUpdateSQLWithReadPrivileges() {
-    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_UPDATE_TABLE));
+    // Kyuubi Paimon Ranger plugin doesn't support to update yet.
   }
 
   @Override
   protected void checkUpdateSQLWithWritePrivileges() {
-    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_UPDATE_TABLE));
+    // Kyuubi Paimon Ranger plugin doesn't support to update yet.
   }
 
   @Override
   protected void checkDeleteSQLWithReadWritePrivileges() {
-    sparkSession.sql(SQL_DELETE_TABLE);
+    // Kyuubi Paimon Ranger plugin doesn't support to delete yet.
   }
 
   @Override
   protected void checkDeleteSQLWithReadPrivileges() {
-    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DELETE_TABLE));
+    // Kyuubi Paimon Ranger plugin doesn't support to delete yet.
   }
 
   @Override
   protected void checkDeleteSQLWithWritePrivileges() {
-    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DELETE_TABLE));
+    // Kyuubi Paimon Ranger plugin doesn't support to delete yet.
   }
 
   @Override
-  protected void checkHaveNoPrivileges() {
+  protected void checkWithoutPrivileges() {
     Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_INSERT_TABLE));
     Assertions.assertThrows(
         AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
@@ -157,8 +175,6 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
     Assertions.assertThrows(
         AccessControlException.class, () -> 
sparkSession.sql(SQL_CREATE_SCHEMA));
     Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_CREATE_TABLE));
-    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DELETE_TABLE));
-    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_UPDATE_TABLE));
   }
 
   @Override
@@ -170,17 +186,15 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
   private static void createCatalog() {
     Map<String, String> properties =
         ImmutableMap.of(
-            IcebergConstants.URI,
+            "uri",
             HIVE_METASTORE_URIS,
-            IcebergConstants.CATALOG_BACKEND,
+            "catalog-backend",
             "hive",
-            IcebergConstants.WAREHOUSE,
+            "warehouse",
             String.format(
                 "hdfs://%s:%d/user/hive/warehouse",
                 
containerSuite.getHiveRangerContainer().getContainerIpAddress(),
                 HiveContainer.HDFS_DEFAULTFS_PORT),
-            IMPERSONATION_ENABLE,
-            "true",
             AUTHORIZATION_PROVIDER,
             "ranger",
             RANGER_SERVICE_NAME,
@@ -199,21 +213,6 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
     LOG.info("Catalog created: {}", catalog);
   }
 
-  @Override
-  protected void useCatalog() throws InterruptedException {
-    String userName1 = System.getenv(HADOOP_USER_NAME);
-    String roleName = currentFunName();
-    SecurableObject securableObject =
-        SecurableObjects.ofMetalake(
-            metalakeName, Lists.newArrayList(Privileges.UseCatalog.allow()));
-    metalake.createRole(roleName, Collections.emptyMap(), 
Lists.newArrayList(securableObject));
-    metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
-    waitForUpdatingPolicies();
-    sparkSession.sql(SQL_USE_CATALOG);
-    metalake.deleteRole(roleName);
-    waitForUpdatingPolicies();
-  }
-
   protected void checkTableAllPrivilegesExceptForCreating() {
     // - a. Succeed to insert data into the table
     sparkSession.sql(SQL_INSERT_TABLE);
diff --git a/docs/security/authorization-pushdown.md 
b/docs/security/authorization-pushdown.md
index 2d93305f5..dbcaa8d80 100644
--- a/docs/security/authorization-pushdown.md
+++ b/docs/security/authorization-pushdown.md
@@ -13,9 +13,9 @@ Gravitino offers a set of authorization frameworks that 
integrate with various u
 Gravitino manages different data sources through Catalogs, and when a user 
performs an authorization operation on data within a Catalog, Gravitino invokes 
the Authorization Plugin module for that Catalog.
 This module translates Gravitino's authorization model into the permission 
rules of the underlying data source. The permissions are then enforced by the 
underlying permission system via the respective client, such as JDBC or the 
Apache Ranger client.
 
-### Authorization Hive with Ranger properties
+### Ranger Hadoop SQL Plugin
 
-In order to use the Authorization Ranger Hive Plugin, you need to configure 
the following properties and [Apache Hive catalog 
properties](../apache-hive-catalog.md#catalog-properties):
+In order to use the Ranger Hadoop SQL Plugin, you need to configure the 
following properties:
 
 | Property Name                       | Description                            
                                                                                
                              | Default Value | Required | Since Version    |
 
|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------|
@@ -28,7 +28,7 @@ In order to use the Authorization Ranger Hive Plugin, you 
need to configure the
 
 Once you have used the correct configuration, you can perform authorization 
operations by calling Gravitino [authorization RESTful 
API](https://gravitino.apache.org/docs/latest/api/rest/grant-roles-to-a-user).
 
-#### Example of using the Authorization Ranger Hive Plugin
+#### Example of using the Ranger Hadoop SQL Plugin
 
 Suppose you have an Apache Hive service in your datacenter and have created a 
`hiveRepo` in Apache Ranger to manage its permissions.
 The Ranger service is accessible at `172.0.0.100:6080`, with the username 
`Jack` and the password `PWD123`.
@@ -43,36 +43,8 @@ authorization.ranger.password=PWD123
 authorization.ranger.service.name=hiveRepo
 ```
 
-### Authorization Iceberg with Ranger properties
-
-In order to use the Authorization Ranger Iceberg Plugin, you need to configure 
the following properties and [Lakehouse_Iceberg catalog 
properties](../lakehouse-iceberg-catalog.md#catalog-properties):
-
-| Property Name                       | Description                            
                                                                                
                              | Default Value | Required | Since Version    |
-|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------|
-| `authorization-provider`            | Providers to use to implement 
authorization plugin such as `ranger`.                                          
                                       | (none)        | No       | 
0.8.0-incubating |
-| `authorization.ranger.admin.url`    | The Apache Ranger web URIs.            
                                                                                
                              | (none)        | No       | 0.8.0-incubating |
-| `authorization.ranger.auth.type`    | The Apache Ranger authentication type 
`simple` or `kerberos`.                                                         
                               | `simple`      | No       | 0.8.0-incubating |
-| `authorization.ranger.username`     | The Apache Ranger admin web login 
username (auth type=simple), or kerberos principal(auth type=kerberos), Need 
have Ranger administrator permission. | (none)        | No       | 
0.8.0-incubating |
-| `authorization.ranger.password`     | The Apache Ranger admin web login user 
password (auth type=simple), or path of the keytab file(auth type=kerberos)     
                              | (none)        | No       | 0.8.0-incubating |
-| `authorization.ranger.service.name` | The Apache Ranger service name.        
                                                                                
                              | (none)        | No       | 0.8.0-incubating |
-
-Once you have used the correct configuration, you can perform authorization 
operations by calling Gravitino [authorization RESTful 
API](https://gravitino.apache.org/docs/latest/api/rest/grant-roles-to-a-user).
-
-#### Example of using the Authorization Ranger Iceberg Plugin
-
-Suppose you have an Apache Hive service in your datacenter and have created a 
`icebergRepo` in Apache Ranger to manage its permissions.
-The Ranger service is accessible at `172.0.0.100:6080`, with the username 
`Jack` and the password `PWD123`.
-To add this Hive service to Gravitino using the Hive catalog, you'll need to 
configure the following parameters.
-
-```properties
-authorization-provider=ranger
-authorization.ranger.admin.url=172.0.0.100:6080
-authorization.ranger.auth.type=simple
-authorization.ranger.username=Jack
-authorization.ranger.password=PWD123
-authorization.ranger.service.name=icebergRepo
-```
-
 :::caution
-Gravitino 0.8.0 only supports the authorization Apache Ranger Hive service and 
Apache Iceberg service. More data source authorization is under development.
+Gravitino 0.8.0 only supports the authorization Apache Ranger Hive service , 
Apache Iceberg service and Apache Paimon Service. 
+Spark can use Kyuubi authorization plugin to access Gravitino's catalog. But 
the plugin can't support to update or delete data for Paimon catalog.
+More data source authorization is under development.
 :::
\ No newline at end of file
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index 3241a4837..6629c8897 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -58,6 +58,7 @@ spark35 = "3.5.1"
 kyuubi4spark33 = "1.7.4"
 kyuubi4spark34 = "1.8.2"
 kyuubi4spark35 = "1.9.0"
+kyuubi4paimon = "1.10.0"
 trino = '435'
 scala-collection-compat = "2.7.0"
 scala-java-compat = "1.0.2"


Reply via email to