Add support in the OP-TEE backend driver for protected memory
allocation. The support is limited to only the SMC ABI and for secure
video buffers.

OP-TEE is probed for the range of protected physical memory and a
memory pool allocator is initialized if OP-TEE have support for such
memory.

Signed-off-by: Jens Wiklander <jens.wiklan...@linaro.org>
Reviewed-by: Sumit Garg <sumit.g...@oss.qualcomm.com>
---
 drivers/tee/optee/Kconfig         |  5 +++
 drivers/tee/optee/core.c          |  7 ++++
 drivers/tee/optee/optee_private.h |  2 +
 drivers/tee/optee/smc_abi.c       | 69 ++++++++++++++++++++++++++++++-
 4 files changed, 81 insertions(+), 2 deletions(-)

diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 7bb7990d0b07..50d2051f7f20 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -25,3 +25,8 @@ config OPTEE_INSECURE_LOAD_IMAGE
 
          Additional documentation on kernel security risks are at
          Documentation/tee/op-tee.rst.
+
+config OPTEE_STATIC_PROTMEM_POOL
+       bool
+       depends on HAS_IOMEM && TEE_DMABUF_HEAPS
+       default y
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index c75fddc83576..49ccfe6f6583 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -56,6 +56,13 @@ int optee_rpmb_intf_rdev(struct notifier_block *intf, 
unsigned long action,
        return 0;
 }
 
+int optee_set_dma_mask(struct optee *optee, u_int pa_width)
+{
+       u64 mask = DMA_BIT_MASK(min(64, pa_width));
+
+       return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask);
+}
+
 static void optee_bus_scan(struct work_struct *work)
 {
        WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
diff --git a/drivers/tee/optee/optee_private.h 
b/drivers/tee/optee/optee_private.h
index 9526087f0e68..4969b83a9851 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -274,6 +274,8 @@ struct optee_call_ctx {
 
 extern struct blocking_notifier_head optee_rpmb_intf_added;
 
+int optee_set_dma_mask(struct optee *optee, u_int pa_width);
+
 int optee_notif_init(struct optee *optee, u_int max_key);
 void optee_notif_uninit(struct optee *optee);
 int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 26f8f7bbbe56..b4c007ed3b94 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1583,6 +1583,68 @@ static inline int optee_load_fw(struct platform_device 
*pdev,
 }
 #endif
 
+static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee)
+{
+#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL)
+       union {
+               struct arm_smccc_res smccc;
+               struct optee_smc_get_protmem_config_result result;
+       } res;
+       struct tee_protmem_pool *pool;
+       void *p;
+       int rc;
+
+       optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0,
+                            0, 0, 0, &res.smccc);
+       if (res.result.status != OPTEE_SMC_RETURN_OK)
+               return ERR_PTR(-EINVAL);
+
+       rc = optee_set_dma_mask(optee, res.result.pa_width);
+       if (rc)
+               return ERR_PTR(rc);
+
+       /*
+        * Map the memory as uncached to make sure the kernel can work with
+        * __pfn_to_page() and friends since that's needed when passing the
+        * protected DMA-buf to a device. The memory should otherwise not
+        * be touched by the kernel since it's likely to cause an external
+        * abort due to the protection status.
+        */
+       p = devm_memremap(&optee->teedev->dev, res.result.start,
+                         res.result.size, MEMREMAP_WC);
+       if (IS_ERR(p))
+               return p;
+
+       pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size);
+       if (IS_ERR(pool))
+               devm_memunmap(&optee->teedev->dev, p);
+
+       return pool;
+#else
+       return ERR_PTR(-EINVAL);
+#endif
+}
+
+static int optee_protmem_pool_init(struct optee *optee)
+{
+       enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+       struct tee_protmem_pool *pool = ERR_PTR(-EINVAL);
+       int rc;
+
+       if (!(optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM))
+               return 0;
+
+       pool = static_protmem_pool_init(optee);
+       if (IS_ERR(pool))
+               return PTR_ERR(pool);
+
+       rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool);
+       if (rc)
+               pool->ops->destroy_pool(pool);
+
+       return rc;
+}
+
 static int optee_probe(struct platform_device *pdev)
 {
        optee_invoke_fn *invoke_fn;
@@ -1678,7 +1740,7 @@ static int optee_probe(struct platform_device *pdev)
        optee = kzalloc(sizeof(*optee), GFP_KERNEL);
        if (!optee) {
                rc = -ENOMEM;
-               goto err_free_pool;
+               goto err_free_shm_pool;
        }
 
        optee->ops = &optee_ops;
@@ -1751,6 +1813,9 @@ static int optee_probe(struct platform_device *pdev)
                pr_info("Asynchronous notifications enabled\n");
        }
 
+       if (optee_protmem_pool_init(optee))
+               pr_info("Protected memory service not available\n");
+
        /*
         * Ensure that there are no pre-existing shm objects before enabling
         * the shm cache so that there's no chance of receiving an invalid
@@ -1802,7 +1867,7 @@ static int optee_probe(struct platform_device *pdev)
        tee_device_unregister(optee->teedev);
 err_free_optee:
        kfree(optee);
-err_free_pool:
+err_free_shm_pool:
        tee_shm_pool_free(pool);
        if (memremaped_shm)
                memunmap(memremaped_shm);
-- 
2.43.0

Reply via email to