If the importer has allow_peer2peer set to true, then we can expect that
it would be able to handle VRAM addresses. Therefore, in this specific
case and only while running in VF mode, do not migrate the BO to System
RAM before exporting it.

Signed-off-by: Vivek Kasireddy <vivek.kasire...@intel.com>
---
 drivers/gpu/drm/xe/xe_dma_buf.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 68f309f5e981..a90c9368d265 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -17,6 +17,7 @@
 #include "xe_bo.h"
 #include "xe_device.h"
 #include "xe_pm.h"
+#include "xe_sriov.h"
 #include "xe_ttm_vram_mgr.h"
 #include "xe_vm.h"
 
@@ -26,8 +27,11 @@ static int xe_dma_buf_attach(struct dma_buf *dmabuf,
                             struct dma_buf_attachment *attach)
 {
        struct drm_gem_object *obj = attach->dmabuf->priv;
+       struct xe_bo *bo = gem_to_xe_bo(obj);
+       struct xe_device *xe = xe_bo_device(bo);
 
        if (attach->peer2peer &&
+           !IS_SRIOV_VF(xe) &&
            pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) 
< 0)
                attach->peer2peer = false;
 
@@ -51,7 +55,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct xe_bo *bo = gem_to_xe_bo(obj);
        struct xe_device *xe = xe_bo_device(bo);
-       int ret;
+       int ret = 0;
 
        /*
         * For now only support pinning in TT memory, for two reasons:
@@ -63,7 +67,8 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
                return -EINVAL;
        }
 
-       ret = xe_bo_migrate(bo, XE_PL_TT);
+       if (!IS_SRIOV_VF(xe) || !attach->peer2peer)
+               ret = xe_bo_migrate(bo, XE_PL_TT);
        if (ret) {
                if (ret != -EINTR && ret != -ERESTARTSYS)
                        drm_dbg(&xe->drm,
-- 
2.45.1

Reply via email to