[PATCH 02/13] drm/amdgpu: add shadow bo support

2016-08-02 Thread Chunming Zhou
shadow bo is the shadow of a bo, which is always in GTT,
which can be used to backup the original bo.

Change-Id: Ia27d4225c47ff41d3053eb691276e29fb2d64026
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 36 +++---
 include/uapi/drm/amdgpu_drm.h  |  2 ++
 3 files changed, 36 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 183781d..d415805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -506,6 +506,7 @@ struct amdgpu_bo {
struct amdgpu_device*adev;
struct drm_gem_object   gem_base;
struct amdgpu_bo*parent;
+   struct amdgpu_bo*shadow;
 
struct ttm_bo_kmap_obj  dma_buf_vmap;
struct amdgpu_mn*mn;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d8e69a7..e6ecf16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -389,6 +389,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 {
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+   int r;
 
memset(&placements, 0,
   (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -396,9 +397,32 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
  placements, domain, flags);
 
-   return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
-  domain, flags, sg, &placement,
-  resv, bo_ptr);
+   r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+   domain, flags, sg, &placement,
+   resv, bo_ptr);
+   if (r)
+   return r;
+
+   if (flags & AMDGPU_GEM_CREATE_SHADOW) {
+   memset(&placements, 0,
+  (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+   amdgpu_ttm_placement_init(adev, &placement,
+ placements, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+   r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+   AMDGPU_GEM_DOMAIN_GTT,
+   AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+   NULL, &placement,
+   (*bo_ptr)->tbo.resv,
+   &(*bo_ptr)->shadow);
+   if (r)
+   amdgpu_bo_unref(bo_ptr);
+   } else
+   (*bo_ptr)->shadow = NULL;
+
+   return r;
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -455,6 +479,12 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
 
if ((*bo) == NULL)
return;
+   if ((*bo)->shadow) {
+   tbo = &((*bo)->shadow->tbo);
+   ttm_bo_unref(&tbo);
+   if (tbo == NULL)
+   (*bo)->shadow = NULL;
+   }
 
tbo = &((*bo)->tbo);
ttm_bo_unref(&tbo);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 8df3816..da2d3e1 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -77,6 +77,8 @@
 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
 /* Flag that the memory should be in VRAM and cleared */
 #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW   (1 << 4)
 
 struct drm_amdgpu_gem_create_in  {
/** the requested memory size */
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/13] drm/amdgpu: set shadow flag for pd/pt bo

2016-08-02 Thread Chunming Zhou
the pd/pt shadow bo will be used to backup page table, when gpu reset
happens, we can restore the page table by them.

Change-Id: I31eeb581f203d1db0654a48745ef4e64ed40ed9b
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 +---
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d415805..ae4608c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -875,6 +875,8 @@ struct amdgpu_ring {
 struct amdgpu_vm_pt {
struct amdgpu_bo_list_entry entry;
uint64_taddr;
+   struct amdgpu_bo_list_entry entry_shadow;
+   uint64_taddr_shadow;
 };
 
 struct amdgpu_vm {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 38c80ea..aedd1cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1331,9 +1331,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
-   struct amdgpu_bo_list_entry *entry;
+   struct amdgpu_bo_list_entry *entry, *entry_shadow;
struct amdgpu_bo *pt;
 
+   entry_shadow = &vm->page_tables[pt_idx].entry_shadow;
entry = &vm->page_tables[pt_idx].entry;
if (entry->robj)
continue;
@@ -1341,7 +1342,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 AMDGPU_GPU_PAGE_SIZE, true,
 AMDGPU_GEM_DOMAIN_VRAM,
-AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+AMDGPU_GEM_CREATE_SHADOW,
 NULL, resv, &pt);
if (r)
goto error_free;
@@ -1363,6 +1365,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
entry->tv.shared = true;
entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0;
+
+   entry_shadow->robj = pt->shadow;
+   entry_shadow->priority = 0;
+   entry_shadow->tv.bo = &entry_shadow->robj->tbo;
+   entry_shadow->tv.shared = true;
+   entry_shadow->user_pages = NULL;
+   vm->page_tables[pt_idx].addr_shadow = 0;
}
 
return 0;
@@ -1540,7 +1549,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
 
r = amdgpu_bo_create(adev, pd_size, align, true,
 AMDGPU_GEM_DOMAIN_VRAM,
-AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+AMDGPU_GEM_CREATE_SHADOW,
 NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/13] drm/amdgpu: update shadow pt bo while update pt

2016-08-02 Thread Chunming Zhou
Change-Id: I8245cdad490d2a0b8cf4b9320e53e14db0b6add4
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index aedd1cb..e7a400d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -651,6 +651,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
if (vm->page_tables[pt_idx].addr == pt)
continue;
vm->page_tables[pt_idx].addr = pt;
+   vm->page_tables[pt_idx].addr_shadow = pt;
 
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -801,7 +802,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device 
*adev,
*vm_update_params,
  struct amdgpu_vm *vm,
  uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+ uint64_t dst, uint32_t flags, bool shadow)
 {
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
@@ -815,7 +816,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device 
*adev,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
-   pt = vm->page_tables[pt_idx].entry.robj;
+   pt = shadow ? vm->page_tables[pt_idx].entry_shadow.robj :
+   vm->page_tables[pt_idx].entry.robj;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -834,7 +836,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device 
*adev,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
-   pt = vm->page_tables[pt_idx].entry.robj;
+   pt = shadow ? vm->page_tables[pt_idx].entry_shadow.robj :
+   vm->page_tables[pt_idx].entry.robj;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -941,6 +944,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device 
*adev,
/* two extra commands for begin/end of fragment */
ndw += 2 * 10;
}
+   /* double ndw, since need to update shadow pt bo as well */
+   ndw *= 2;
 
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
@@ -960,9 +965,12 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
if (r)
goto error_free;
+   /* update shadow pt bo */
+   amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+ last + 1, addr, flags, true);
 
amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags);
+ last + 1, addr, flags, false);
 
amdgpu_ring_pad_ib(ring, vm_update_params.ib);
WARN_ON(vm_update_params.ib->length_dw > ndw);
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/13] drm/amdgpu: irq resume should be immediately after gpu resume

2016-08-02 Thread Chunming Zhou
Change-Id: Icf64bf5964f0ef66c239ab0679d51275cc272699
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index edbb370..7fd651a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2158,6 +2158,7 @@ retry:
amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
+   amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
@@ -2192,7 +2193,6 @@ retry:
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
-   amdgpu_irq_gpu_reset_resume_helper(adev);
 
return r;
 }
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/13] shadow page table support

2016-08-02 Thread Chunming Zhou
Since we cannot make sure VRAM is safe after gpu reset, page table backup
is neccessary, shadow page table is sense way to recovery page talbe when
gpu reset happens.
We need to allocate GTT bo as the shadow of VRAM bo when creating page table,
and make them same. After gpu reset, we will need to use SDMA to copy GTT bo
content to VRAM bo, then page table will be recoveried. 

Chunming Zhou (13):
  drm/amdgpu: irq resume should be immediately after gpu resume
  drm/amdgpu: add shadow bo support
  drm/amdgpu: set shadow flag for pd/pt bo
  drm/amdgpu: update shadow pt bo while update pt
  drm/amdgpu: update pd shadow while updating pd
  drm/amdgpu: implement amdgpu_vm_recover_page_table_from_shadow
  drm/amdgpu: link all vm clients
  drm/amdgpu: add vm_list_lock
  drm/amd: add block entity function
  drm/amdgpu: recover page tables after gpu reset
  drm/amd: wait neccessary dependency before running job
  drm/amdgpu: add vm recover pt fence
  drm/amdgpu: add backup condition for shadow page table

 drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  15 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|   6 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c|  33 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |   5 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c|  36 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c| 219 ++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |  30 +++-
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |   3 +
 include/uapi/drm/amdgpu_drm.h |   2 +
 9 files changed, 316 insertions(+), 33 deletions(-)

-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/13] drm/amd: wait neccessary dependency before running job

2016-08-02 Thread Chunming Zhou
Change-Id: Ibcc3558c2330caad1a2edb9902b3f21bd950d19f
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 71b5f1a..a15fd88 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -436,9 +436,13 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler 
*sched)
 
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
-   struct fence *fence;
+   struct fence *fence, *dependency;
 
spin_unlock(&sched->job_list_lock);
+   while ((dependency = sched->ops->dependency(s_job))) {
+  fence_wait(dependency, false);
+  fence_put(dependency);
+   }
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/13] drm/amd: add block entity function

2016-08-02 Thread Chunming Zhou
Change-Id: Ia0378640962eef362569e0bbe090aea1ca083a55
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 24 
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  3 +++
 2 files changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index f96aa82..71b5f1a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -110,6 +110,26 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
 }
 
 /**
+ * block all entity of this run queue
+ *
+ * @rq The run queue to check.
+ *
+ */
+int amd_sched_rq_block_entity(struct amd_sched_rq *rq, bool block)
+{
+   struct amd_sched_entity *entity;
+
+   spin_lock(&rq->lock);
+
+   list_for_each_entry(entity, &rq->entities, list)
+   entity->block = block;
+
+   spin_unlock(&rq->lock);
+
+   return 0;
+}
+
+/**
  * Init a context entity used by scheduler when submit to HW ring.
  *
  * @sched  The pointer to the scheduler
@@ -134,6 +154,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&entity->list);
entity->rq = rq;
entity->sched = sched;
+   entity->block = false;
 
spin_lock_init(&entity->queue_lock);
r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
@@ -186,6 +207,9 @@ static bool amd_sched_entity_is_idle(struct 
amd_sched_entity *entity)
  */
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
 {
+   if (entity->block)
+   return false;
+
if (kfifo_is_empty(&entity->job_queue))
return false;
 
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb..a1c0073 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -52,6 +52,8 @@ struct amd_sched_entity {
 
struct fence*dependency;
struct fence_cb cb;
+
+   boolblock;
 };
 
 /**
@@ -155,4 +157,5 @@ int amd_sched_job_init(struct amd_sched_job *job,
   void *owner);
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
+int amd_sched_rq_block_entity(struct amd_sched_rq *rq, bool block);
 #endif
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/13] drm/amdgpu: implement amdgpu_vm_recover_page_table_from_shadow

2016-08-02 Thread Chunming Zhou
Change-Id: I9957e726576289448911f5fb2ff7bcb9311a1906
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 77 ++
 2 files changed, 79 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4cdfdff..1234b8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1006,6 +1006,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
   uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  struct amdgpu_bo_va *bo_va);
+int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
+struct amdgpu_vm *vm);
 
 /*
  * context related structures
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fb8a7ab..4f95dc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,83 @@ error_free:
return r;
 }
 
+static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
+   struct amdgpu_bo *bo,
+   struct amdgpu_bo *bo_shadow,
+   struct reservation_object *resv)
+
+{
+   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+   struct fence *fence;
+   int r;
+   uint64_t vram_addr, gtt_addr;
+
+   r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+   if (r) {
+   DRM_ERROR("Failed to pin bo object\n");
+   goto err1;
+   }
+   r = amdgpu_bo_pin(bo_shadow, AMDGPU_GEM_DOMAIN_GTT, >t_addr);
+   if (r) {
+   DRM_ERROR("Failed to pin bo shadow object\n");
+   goto err2;
+   }
+
+   r = reservation_object_reserve_shared(bo->tbo.resv);
+   if (r)
+   goto err3;
+
+   r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+  amdgpu_bo_size(bo), resv, &fence);
+   if (!r)
+   amdgpu_bo_fence(bo, fence, true);
+
+err3:
+   amdgpu_bo_unpin(bo_shadow);
+err2:
+   amdgpu_bo_unpin(bo);
+err1:
+
+   return r;
+}
+
+int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
+struct amdgpu_vm *vm)
+{
+   uint64_t pt_idx;
+   int r;
+
+   /* bo and shadow use same resv, so reverve one time */
+   r = amdgpu_bo_reserve(vm->page_directory, false);
+   if (unlikely(r != 0))
+   return r;
+
+   r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
+vm->page_directory->shadow,
+NULL);
+   if (r) {
+   DRM_ERROR("recover page table failed!\n");
+   goto err;
+   }
+
+   for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+   struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+   struct amdgpu_bo *bo_shadow = 
vm->page_tables[pt_idx].entry_shadow.robj;
+
+   if (!bo || !bo_shadow)
+   continue;
+   r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
+NULL);
+   if (r) {
+   DRM_ERROR("recover page table failed!\n");
+   goto err;
+   }
+   }
+
+err:
+   amdgpu_bo_unreserve(vm->page_directory);
+   return r;
+}
 /**
  * amdgpu_vm_update_pdes - make sure that page directory is valid
  *
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/13] drm/amdgpu: update pd shadow while updating pd

2016-08-02 Thread Chunming Zhou
Change-Id: Icafa90a6625ea7b5ab3e360ba0d73544cda251b0
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 68 +++---
 2 files changed, 48 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ae4608c..4cdfdff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -897,6 +897,7 @@ struct amdgpu_vm {
 
/* contains the page directory */
struct amdgpu_bo*page_directory;
+   struct amdgpu_bo_list_entry pd_entry_shadow;
unsignedmax_pde_used;
struct fence*page_directory_fence;
uint64_tlast_eviction_counter;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e7a400d..fb8a7ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -138,13 +138,15 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+   struct amdgpu_bo_list_entry *entry_shadow = 
&vm->page_tables[i].entry_shadow;
 
-   if (!entry->robj)
+   if (!entry->robj || !entry_shadow->robj)
continue;
 
list_add(&entry->tv.head, duplicates);
+   list_add(&entry_shadow->tv.head, duplicates);
}
-
+   list_add(&vm->pd_entry_shadow.tv.head, duplicates);
 }
 
 /**
@@ -597,23 +599,13 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, 
uint64_t addr)
return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-   struct amdgpu_vm *vm)
+
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+struct amdgpu_vm *vm, bool shadow)
 {
struct amdgpu_ring *ring;
-   struct amdgpu_bo *pd = vm->page_directory;
+   struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+   vm->page_directory;
uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
@@ -648,10 +640,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
continue;
 
pt = amdgpu_bo_gpu_offset(bo);
-   if (vm->page_tables[pt_idx].addr == pt)
-   continue;
-   vm->page_tables[pt_idx].addr = pt;
-   vm->page_tables[pt_idx].addr_shadow = pt;
+   if (!shadow) {
+   if (vm->page_tables[pt_idx].addr == pt)
+   continue;
+   vm->page_tables[pt_idx].addr = pt;
+   } else {
+   if (vm->page_tables[pt_idx].addr_shadow == pt)
+   continue;
+   vm->page_tables[pt_idx].addr_shadow = pt;
+   }
 
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -704,6 +701,29 @@ error_free:
 }
 
 /**
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+   struct amdgpu_vm *vm)
+{
+   int r;
+
+   r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+   if (r)
+   return r;
+   return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+}
+
+/**
  * amdgpu_vm_frag_ptes - add fragment information to PTEs
  *
  * @adev: amdgpu_device pointer
@@ -1573,6 +1593,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
goto error_free_page_directory;
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
 
+   vm->pd_entry_shadow.robj = vm->page_directory->shadow;
+   vm->pd_entry_shadow.priority = 0;
+   vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
+   vm->pd_entry_shadow.tv.shared = true;
+   vm->pd_entry_shadow.user_pages = NULL;
+
return 0;
 
 error_free_p

[PATCH 10/13] drm/amdgpu: recover page tables after gpu reset

2016-08-02 Thread Chunming Zhou
Change-Id: I963598ba6eb44bc8620d70e026c0175d1a1de120
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 28 +++-
 1 file changed, 27 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2412cc0..5e984cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2161,18 +2161,44 @@ retry:
amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
+   struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring;
+
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
r = amdgpu_suspend(adev);
+   need_full_reset = true;
goto retry;
}
-
+   /**
+* recovery vm page tables, since we cannot depend on VRAM is 
no problem
+* after gpu full reset.
+*/
+   if (need_full_reset && !(adev->flags & AMD_IS_APU)) {
+   struct amdgpu_vm *vm, *tmp;
+
+   DRM_INFO("recover page table from shadow\n");
+   amd_sched_rq_block_entity(
+   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
+   kthread_unpark(buffer_ring->sched.thread);
+   spin_lock(&adev->vm_list_lock);
+   list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) 
{
+   spin_unlock(&adev->vm_list_lock);
+   amdgpu_vm_recover_page_table_from_shadow(adev, 
vm);
+   spin_lock(&adev->vm_list_lock);
+   }
+   spin_unlock(&adev->vm_list_lock);
+   amd_sched_rq_block_entity(
+   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+   }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
+
+   DRM_INFO("ring:%d recover jobs\n", ring->idx);
+   kthread_park(buffer_ring->sched.thread);
amd_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/13] drm/amdgpu: link all vm clients

2016-08-02 Thread Chunming Zhou
Add vm client to list tail when creating it, move to head while submit to 
scheduler.

Change-Id: I0625092f918853303a5ee97ea2eac87fb790ed69
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 6 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +++
 4 files changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1234b8e..61fd4e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -895,6 +895,9 @@ struct amdgpu_vm {
/* BO mappings freed, but not yet updated in the PT */
struct list_headfreed;
 
+   /* vm itself list */
+   struct list_headlist;
+
/* contains the page directory */
struct amdgpu_bo*page_directory;
struct amdgpu_bo_list_entry pd_entry_shadow;
@@ -2167,6 +2170,9 @@ struct amdgpu_device {
struct kfd_dev  *kfd;
 
struct amdgpu_virtualization virtualization;
+
+   /* link all vm clients */
+   struct list_headvm_list;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 45d5227..d19838b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -835,7 +835,10 @@ static int amdgpu_cs_dependencies(struct amdgpu_device 
*adev,
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
 {
+   struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+   struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring;
+   struct amdgpu_device *adev = ring->adev;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
int r;
@@ -858,6 +861,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+   list_move(&vm->list, &adev->vm_list);
 
return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7fd651a..552216a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1559,6 +1559,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
 
+   INIT_LIST_HEAD(&adev->vm_list);
+
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4f95dc4..35d939b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1628,6 +1628,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
+   INIT_LIST_HEAD(&vm->list);
 
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1675,6 +1676,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
vm->pd_entry_shadow.tv.shared = true;
vm->pd_entry_shadow.user_pages = NULL;
+   list_add_tail(&vm->list, &adev->vm_list);
 
return 0;
 
@@ -1702,6 +1704,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
int i;
 
+   list_del(&vm->list);
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
if (!RB_EMPTY_ROOT(&vm->va)) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/13] drm/amdgpu: add vm_list_lock

2016-08-02 Thread Chunming Zhou
To lock adev->vm_list.

Change-Id: I74d309eca9c22d190dd4072c69d26fa7fdea8884
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 
 4 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 61fd4e2..a2261ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2173,6 +2173,7 @@ struct amdgpu_device {
 
/* link all vm clients */
struct list_headvm_list;
+   spinlock_t  vm_list_lock;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d19838b..29c10f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -861,7 +861,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+   spin_lock(&adev->vm_list_lock);
list_move(&vm->list, &adev->vm_list);
+   spin_unlock(&adev->vm_list_lock);
 
return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 552216a..2412cc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1560,6 +1560,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->audio_endpt_idx_lock);
 
INIT_LIST_HEAD(&adev->vm_list);
+   spin_lock_init(&adev->vm_list_lock);
 
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 35d939b..1cb2e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1676,7 +1676,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
vm->pd_entry_shadow.tv.shared = true;
vm->pd_entry_shadow.user_pages = NULL;
+   spin_lock(&adev->vm_list_lock);
list_add_tail(&vm->list, &adev->vm_list);
+   spin_unlock(&adev->vm_list_lock);
 
return 0;
 
@@ -1704,7 +1706,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
int i;
 
+   spin_lock(&adev->vm_list_lock);
list_del(&vm->list);
+   spin_unlock(&adev->vm_list_lock);
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
if (!RB_EMPTY_ROOT(&vm->va)) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 12/13] drm/amdgpu: add vm recover pt fence

2016-08-02 Thread Chunming Zhou
Before every job runs, we must make sure which's vm is recoverred completely.

Change-Id: Ibe77a3c8f8206def280543fbb4195ad2ab9772e0
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  5 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 24 ++--
 3 files changed, 25 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a2261ac..c0e4164 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -919,6 +919,8 @@ struct amdgpu_vm {
 
/* client id */
u64 client_id;
+
+   struct fence*recover_pt_fence;
 };
 
 struct amdgpu_vm_id {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 6674d40..8d87a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -152,6 +152,10 @@ static struct fence *amdgpu_job_dependency(struct 
amd_sched_job *sched_job)
fence = amdgpu_sync_get_fence(&job->sync);
}
 
+   if (fence == NULL && vm && vm->recover_pt_fence &&
+   !fence_is_signaled(vm->recover_pt_fence))
+   fence = fence_get(vm->recover_pt_fence);
+
return fence;
 }
 
@@ -170,6 +174,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job 
*sched_job)
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
trace_amdgpu_sched_run_job(job);
+
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
   job->sync.last_vm_update, job, &fence);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1cb2e71..1305dc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -703,11 +703,11 @@ error_free:
 static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
struct amdgpu_bo *bo_shadow,
-   struct reservation_object *resv)
+   struct reservation_object *resv,
+   struct fence **fence)
 
 {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-   struct fence *fence;
int r;
uint64_t vram_addr, gtt_addr;
 
@@ -727,9 +727,9 @@ static int amdgpu_vm_recover_bo_from_shadow(struct 
amdgpu_device *adev,
goto err3;
 
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-  amdgpu_bo_size(bo), resv, &fence);
+  amdgpu_bo_size(bo), resv, fence);
if (!r)
-   amdgpu_bo_fence(bo, fence, true);
+   amdgpu_bo_fence(bo, *fence, true);
 
 err3:
amdgpu_bo_unpin(bo_shadow);
@@ -743,6 +743,7 @@ err1:
 int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
 struct amdgpu_vm *vm)
 {
+   struct fence *fence;
uint64_t pt_idx;
int r;
 
@@ -753,11 +754,14 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
 
r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
 vm->page_directory->shadow,
-NULL);
+NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
+   fence_put(vm->recover_pt_fence);
+   vm->recover_pt_fence = fence_get(fence);
+   fence_put(fence);
 
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
@@ -766,15 +770,21 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
if (!bo || !bo_shadow)
continue;
r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
-NULL);
+NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
+   fence_put(vm->recover_pt_fence);
+   vm->recover_pt_fence = fence_get(fence);
+   fence_put(fence);
}
 
 err:
amdgpu_bo_unreserve(vm->page_directory);
+   if (vm->recover_pt_fence)
+   r = fence_wait(vm->recover_pt_fence, false);
+
return r;
 }
 /**
@@ -1629,6 +1639,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
   

[PATCH 13/13] drm/amdgpu: add backup condition for shadow page table

2016-08-02 Thread Chunming Zhou
Change-Id: I5a8c0f4c1e9b65d2310ccb0f669b478884072a11
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 71 +++---
 1 file changed, 48 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1305dc1..0e3f116 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -112,6 +112,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
 }
 
+static bool amdgpu_vm_need_backup(struct amdgpu_device *adev)
+{
+   if (adev->flags & AMD_IS_APU)
+   return false;
+
+   return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
 /**
  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
  *
@@ -140,13 +148,18 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
struct amdgpu_bo_list_entry *entry_shadow = 
&vm->page_tables[i].entry_shadow;
 
-   if (!entry->robj || !entry_shadow->robj)
+   if (!entry->robj)
+   continue;
+
+   if (amdgpu_vm_need_backup(adev) && !entry_shadow->robj)
continue;
 
list_add(&entry->tv.head, duplicates);
-   list_add(&entry_shadow->tv.head, duplicates);
+   if (amdgpu_vm_need_backup(adev))
+   list_add(&entry_shadow->tv.head, duplicates);
}
-   list_add(&vm->pd_entry_shadow.tv.head, duplicates);
+   if (amdgpu_vm_need_backup(adev))
+   list_add(&vm->pd_entry_shadow.tv.head, duplicates);
 }
 
 /**
@@ -747,6 +760,8 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
uint64_t pt_idx;
int r;
 
+   if (!amdgpu_vm_need_backup(adev))
+   return 0;
/* bo and shadow use same resv, so reverve one time */
r = amdgpu_bo_reserve(vm->page_directory, false);
if (unlikely(r != 0))
@@ -804,9 +819,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
 {
int r;
 
-   r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
-   if (r)
-   return r;
+   if (amdgpu_vm_need_backup(adev)) {
+   r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+   if (r)
+   return r;
+   }
+
return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
 }
 
@@ -1072,10 +1090,11 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
if (r)
goto error_free;
-   /* update shadow pt bo */
-   amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags, true);
-
+   if (amdgpu_vm_need_backup(adev)) {
+   /* update shadow pt bo */
+   amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+ last + 1, addr, flags, true);
+   }
amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
  last + 1, addr, flags, false);
 
@@ -1458,7 +1477,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 AMDGPU_GPU_PAGE_SIZE, true,
 AMDGPU_GEM_DOMAIN_VRAM,
 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-AMDGPU_GEM_CREATE_SHADOW,
+(amdgpu_vm_need_backup(adev) ?
+ AMDGPU_GEM_CREATE_SHADOW : 0),
 NULL, resv, &pt);
if (r)
goto error_free;
@@ -1481,12 +1501,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0;
 
-   entry_shadow->robj = pt->shadow;
-   entry_shadow->priority = 0;
-   entry_shadow->tv.bo = &entry_shadow->robj->tbo;
-   entry_shadow->tv.shared = true;
-   entry_shadow->user_pages = NULL;
-   vm->page_tables[pt_idx].addr_shadow = 0;
+   if (amdgpu_vm_need_backup(adev)) {
+   entry_shadow->robj = pt->shadow;
+   entry_shadow->priority = 0;
+   entry_shadow->tv.bo = &entry_shadow->robj->tbo;
+   entry_shadow->tv.shared = true;
+   entry_shadow->user_pages = NULL;
+   vm->page_tables[pt_idx].addr_shadow = 0;
+   }
}
 
return 0;
@@ -1667,7 +1689,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
   

[PATCH 03/11] drm/amd: add recover run queue for scheduler

2016-08-02 Thread Chunming Zhou
Change-Id: I7171d1e3884aabe1263d8f7be18cadf2e98216a4
Signed-off-by: Chunming Zhou 
Reviewed-by: Edward O'Callaghan 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index a1c0073..cd87bc7 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -112,7 +112,8 @@ struct amd_sched_backend_ops {
 };
 
 enum amd_sched_priority {
-   AMD_SCHED_PRIORITY_KERNEL = 0,
+   AMD_SCHED_PRIORITY_RECOVER = 0,
+   AMD_SCHED_PRIORITY_KERNEL,
AMD_SCHED_PRIORITY_NORMAL,
AMD_SCHED_MAX_PRIORITY
 };
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/11] drm/amdgpu: fix vm init error path

2016-08-02 Thread Chunming Zhou
Change-Id: Ie3d5440dc0d2d3a61d8e785ab08b8b91eda223db
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 11c1263..1d58577 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1682,7 +1682,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
r = amd_sched_entity_init(&ring->sched, &vm->entity,
  rq, amdgpu_sched_jobs);
if (r)
-   return r;
+   goto err;
 
vm->page_directory_fence = NULL;
 
@@ -1725,6 +1725,9 @@ error_free_page_directory:
 error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
 
+err:
+   drm_free_large(vm->page_tables);
+
return r;
 }
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/11] add recovery entity and run queue

2016-08-02 Thread Chunming Zhou
every vm has itself recovery entity, which is used to reovery page table from 
their shadow.
They don't need to wait front vm completed.
And also using all pte rings can speed reovery.

every scheduler has its own recovery entity, which is used to save hw jobs, and 
resubmit from it, which solves the conflicts between reset thread and scheduler 
thread when run job.

And some fixes when doing this improment. 

Chunming Zhou (11):
  drm/amdgpu: hw ring should be empty when gpu reset
  drm/amdgpu: specify entity to amdgpu_copy_buffer
  drm/amd: add recover run queue for scheduler
  drm/amdgpu: fix vm init error path
  drm/amdgpu: add vm recover entity
  drm/amdgpu: use all pte rings to recover page table
  drm/amd: add recover entity for every scheduler
  drm/amd: use scheduler to recover hw jobs
  drm/amd: hw job list should be exact
  drm/amd: reset jobs to recover entity
  drm/amdgpu: no need fence wait every time

 drivers/gpu/drm/amd/amdgpu/amdgpu.h   |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c |   3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c|  35 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  |  11 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c  |   8 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  27 --
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 129 +-
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |   4 +-
 9 files changed, 135 insertions(+), 92 deletions(-)

-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/11] drm/amdgpu: hw ring should be empty when gpu reset

2016-08-02 Thread Chunming Zhou
Change-Id: I08ca5a805f590cc7aad0e9ccd91bd5925bb216e2
Signed-off-by: Chunming Zhou 
Reviewed-by: Edward O'Callaghan 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 11 +++
 3 files changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c0e4164..a0dea0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1253,6 +1253,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib 
*ib);
+void amdgpu_ring_reset(struct amdgpu_ring *ring);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5e984cf..b470e5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2117,6 +2117,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
continue;
kthread_park(ring->sched.thread);
amd_sched_hw_job_reset(&ring->sched);
+   amdgpu_ring_reset(ring);
}
/* after all hw jobs are reset, hw fence is meaningless, so 
force_completion */
amdgpu_fence_driver_force_completion(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9989e25..75e1da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -110,6 +110,17 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, 
struct amdgpu_ib *ib)
ib->ptr[ib->length_dw++] = ring->nop;
 }
 
+void amdgpu_ring_reset(struct amdgpu_ring *ring)
+{
+   u32 rptr = amdgpu_ring_get_rptr(ring);
+
+   ring->wptr = rptr;
+   ring->wptr &= ring->ptr_mask;
+
+   mb();
+   amdgpu_ring_set_wptr(ring);
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/11] drm/amdgpu: specify entity to amdgpu_copy_buffer

2016-08-02 Thread Chunming Zhou
Change-Id: Ib84621d8ab61bf2ca0719c6888cc403982127684
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c  | 8 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   | 5 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c| 2 +-
 5 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a0dea0d..4889d13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -432,6 +432,7 @@ struct amdgpu_mman {
 };
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+  struct amd_sched_entity *entity,
   uint64_t src_offset,
   uint64_t dst_offset,
   uint32_t byte_count,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a4..cab93c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device 
*adev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-   r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity,
+  saddr, daddr, size, NULL, &fence);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4..bbaa1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -110,8 +110,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
amdgpu_bo_kunmap(gtt_obj[i]);
 
-   r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-  size, NULL, &fence);
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr,
+  vram_addr, size, NULL, &fence);
 
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
amdgpu_bo_kunmap(vram_obj);
 
-   r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
-  size, NULL, &fence);
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, vram_addr,
+  gtt_addr, size, NULL, &fence);
 
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5dc9c4c..55c50b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -283,7 +283,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
 
-   r = amdgpu_copy_buffer(ring, old_start, new_start,
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, old_start, new_start,
   new_mem->num_pages * PAGE_SIZE, /* bytes */
   bo->resv, &fence);
if (r)
@@ -1147,6 +1147,7 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct 
*vma)
 }
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+  struct amd_sched_entity *entity,
   uint64_t src_offset,
   uint64_t dst_offset,
   uint32_t byte_count,
@@ -1195,7 +1196,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
-   r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+   r = amdgpu_job_submit(job, ring, entity,
  AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0e3f116..11c1263 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -739,7 +739,7 @@ static int amdgpu_vm_recover_bo_from_shadow(struct 
amdgpu_device *adev,
if (r)
goto err3;
 
-   r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr, vram_addr,
   amdgpu_bo_size(bo), resv, fence);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
-- 
1.9.1

___
amd-gfx mailing list
am

[PATCH 08/11] drm/amd: use scheduler to recover hw jobs

2016-08-02 Thread Chunming Zhou
The old way is trying to recover hw jobs directly, which will conflict
with scheduler thread.

Change-Id: I9e45abd43ae280a675b0b0d88a820106dea2716c
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 48 +--
 1 file changed, 16 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 36f5805..9f4fa6e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -324,10 +324,12 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
  *
  * Returns true if we could submit the job.
  */
-static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
+static bool amd_sched_entity_in_or_recover(struct amd_sched_job *sched_job,
+  bool recover)
 {
struct amd_gpu_scheduler *sched = sched_job->sched;
-   struct amd_sched_entity *entity = sched_job->s_entity;
+   struct amd_sched_entity *entity = recover ? &sched->recover_entity :
+   sched_job->s_entity;
bool added, first = false;
 
spin_lock(&entity->queue_lock);
@@ -348,6 +350,15 @@ static bool amd_sched_entity_in(struct amd_sched_job 
*sched_job)
return added;
 }
 
+static void amd_sched_entity_push_job_recover(struct amd_sched_job *sched_job)
+{
+   struct amd_sched_entity *entity = sched_job->s_entity;
+
+   trace_amd_sched_job(sched_job);
+   wait_event(entity->sched->job_scheduled,
+  amd_sched_entity_in_or_recover(sched_job, true));
+}
+
 /* job_finish is called after hw fence signaled, and
  * the job had already been deleted from ring_mirror_list
  */
@@ -426,39 +437,12 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler 
*sched)
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
 {
struct amd_sched_job *s_job, *tmp;
-   int r;
 
spin_lock(&sched->job_list_lock);
-   s_job = list_first_entry_or_null(&sched->ring_mirror_list,
-struct amd_sched_job, node);
-   if (s_job)
-   schedule_delayed_work(&s_job->work_tdr, sched->timeout);
-
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
-   struct amd_sched_fence *s_fence = s_job->s_fence;
-   struct fence *fence, *dependency;
-
+   list_del_init(&s_job->node);
spin_unlock(&sched->job_list_lock);
-   while ((dependency = sched->ops->dependency(s_job))) {
-  fence_wait(dependency, false);
-  fence_put(dependency);
-   }
-   fence = sched->ops->run_job(s_job);
-   atomic_inc(&sched->hw_rq_count);
-   if (fence) {
-   s_fence->parent = fence_get(fence);
-   r = fence_add_callback(fence, &s_fence->cb,
-  amd_sched_process_job);
-   if (r == -ENOENT)
-   amd_sched_process_job(fence, &s_fence->cb);
-   else if (r)
-   DRM_ERROR("fence add callback failed (%d)\n",
- r);
-   fence_put(fence);
-   } else {
-   DRM_ERROR("Failed to run job!\n");
-   amd_sched_process_job(NULL, &s_fence->cb);
-   }
+   amd_sched_entity_push_job_recover(s_job);
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
@@ -479,7 +463,7 @@ void amd_sched_entity_push_job(struct amd_sched_job 
*sched_job)
fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
   amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
-  amd_sched_entity_in(sched_job));
+  amd_sched_entity_in_or_recover(sched_job, false));
 }
 
 /* init a sched_job with basic field */
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 10/11] drm/amd: reset jobs to recover entity

2016-08-02 Thread Chunming Zhou
remove recover_entity for recover_rq when reset job.
add recover_entity back when recover job

Change-Id: Ic2e5cb6ab79d2abc49374e1770299487e327efe9
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 0444df0..191437c 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -417,9 +417,10 @@ static void amd_sched_job_timedout(struct work_struct 
*work)
job->sched->ops->timedout_job(job);
 }
 
+/* scheduler must be parked before job reset */
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 {
-   struct amd_sched_job *s_job;
+   struct amd_sched_job *s_job, *tmp;
 
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
@@ -429,14 +430,6 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler 
*sched)
}
}
atomic_set(&sched->hw_rq_count, 0);
-   spin_unlock(&sched->job_list_lock);
-}
-
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
-{
-   struct amd_sched_job *s_job, *tmp;
-
-   spin_lock(&sched->job_list_lock);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
list_del_init(&s_job->node);
spin_unlock(&sched->job_list_lock);
@@ -444,6 +437,14 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler 
*sched)
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
+   amd_sched_rq_remove_entity(&sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER],
+  &sched->recover_entity);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+   amd_sched_rq_add_entity(&sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER],
+   &sched->recover_entity);
 }
 
 /**
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/11] drm/amdgpu: no need fence wait every time

2016-08-02 Thread Chunming Zhou
recover entities have handled very well for each dependency.

Change-Id: I70a8d0e2753741c4b54d9e01085d00dd708b5c80
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8b8d3db..a34d94a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -797,8 +797,6 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
 
 err:
amdgpu_bo_unreserve(vm->page_directory);
-   if (vm->recover_pt_fence)
-   r = fence_wait(vm->recover_pt_fence, false);
 
return r;
 }
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/11] drm/amdgpu: use all pte rings to recover page table

2016-08-02 Thread Chunming Zhou
Change-Id: Ic74508ec9de0bf1c027313ce9574e6cb8ea9bb1d
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 34 ++
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b470e5a..b7b4cf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2098,6 +2098,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
int i, r;
int resched;
bool need_full_reset;
+   u32 unpark_bits;
 
if (!amdgpu_check_soft_reset(adev)) {
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
@@ -2119,6 +2120,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
amd_sched_hw_job_reset(&ring->sched);
amdgpu_ring_reset(ring);
}
+   unpark_bits = 0;
/* after all hw jobs are reset, hw fence is meaningless, so 
force_completion */
amdgpu_fence_driver_force_completion(adev);
/* store modesetting */
@@ -2162,8 +2164,6 @@ retry:
amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
-   struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring;
-
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
@@ -2178,11 +2178,20 @@ retry:
 */
if (need_full_reset && !(adev->flags & AMD_IS_APU)) {
struct amdgpu_vm *vm, *tmp;
+   int i;
 
DRM_INFO("recover page table from shadow\n");
-   amd_sched_rq_block_entity(
-   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
-   kthread_unpark(buffer_ring->sched.thread);
+   for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) 
{
+   struct amdgpu_ring *ring = 
adev->vm_manager.vm_pte_rings[i];
+
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], true);
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
+   kthread_unpark(ring->sched.thread);
+   unpark_bits |= 1 << ring->idx;
+   }
+
spin_lock(&adev->vm_list_lock);
list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) 
{
spin_unlock(&adev->vm_list_lock);
@@ -2190,8 +2199,15 @@ retry:
spin_lock(&adev->vm_list_lock);
}
spin_unlock(&adev->vm_list_lock);
-   amd_sched_rq_block_entity(
-   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+
+   for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) 
{
+   struct amdgpu_ring *ring = 
adev->vm_manager.vm_pte_rings[i];
+
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], false);
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+   }
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2199,9 +2215,9 @@ retry:
continue;
 
DRM_INFO("ring:%d recover jobs\n", ring->idx);
-   kthread_park(buffer_ring->sched.thread);
amd_sched_job_recovery(&ring->sched);
-   kthread_unpark(ring->sched.thread);
+   if (!((unpark_bits >> ring->idx) & 0x1))
+   kthread_unpark(ring->sched.thread);
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/11] drm/amdgpu: add vm recover entity

2016-08-02 Thread Chunming Zhou
every vm uses itself recover entity to recovery page table from shadow.

Change-Id: I93e37666cb3fb511311c96ff172b6e9ebd337547
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 +++---
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4889d13..b1a4af0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -917,7 +917,8 @@ struct amdgpu_vm {
 
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
-
+   struct amd_sched_entity recover_entity;
+   struct amdgpu_ring  *ring;
/* client id */
u64 client_id;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1d58577..8b8d3db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -714,13 +714,13 @@ error_free:
 }
 
 static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
+   struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
struct amdgpu_bo *bo_shadow,
struct reservation_object *resv,
struct fence **fence)
 
 {
-   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
int r;
uint64_t vram_addr, gtt_addr;
 
@@ -739,8 +739,8 @@ static int amdgpu_vm_recover_bo_from_shadow(struct 
amdgpu_device *adev,
if (r)
goto err3;
 
-   r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr, vram_addr,
-  amdgpu_bo_size(bo), resv, fence);
+   r = amdgpu_copy_buffer(vm->ring, &vm->recover_entity, gtt_addr,
+  vram_addr, amdgpu_bo_size(bo), resv, fence);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
 
@@ -767,7 +767,7 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
if (unlikely(r != 0))
return r;
 
-   r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
+   r = amdgpu_vm_recover_bo_from_shadow(adev, vm, vm->page_directory,
 vm->page_directory->shadow,
 NULL, &fence);
if (r) {
@@ -784,7 +784,7 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
 
if (!bo || !bo_shadow)
continue;
-   r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
+   r = amdgpu_vm_recover_bo_from_shadow(adev, vm, bo, bo_shadow,
 NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
@@ -1678,12 +1678,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
+   rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER];
+   r = amd_sched_entity_init(&ring->sched, &vm->recover_entity,
+ rq, amdgpu_sched_jobs);
+   if (r)
+   goto err;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &vm->entity,
  rq, amdgpu_sched_jobs);
if (r)
-   goto err;
-
+   goto err1;
+   vm->ring = ring;
vm->page_directory_fence = NULL;
 
r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1725,6 +1730,8 @@ error_free_page_directory:
 error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
 
+err1:
+   amd_sched_entity_fini(&ring->sched, &vm->recover_entity);
 err:
drm_free_large(vm->page_tables);
 
@@ -1748,6 +1755,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
spin_lock(&adev->vm_list_lock);
list_del(&vm->list);
spin_unlock(&adev->vm_list_lock);
+   amd_sched_entity_fini(vm->entity.sched, &vm->recover_entity);
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
if (!RB_EMPTY_ROOT(&vm->va)) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/11] drm/amd: add recover entity for every scheduler

2016-08-02 Thread Chunming Zhou
It will be used to recover hw jobs.

Change-Id: I5508f5ffa04909b480ddd669dfb297e5059eba04
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 24 
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
 2 files changed, 21 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a15fd88..36f5805 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -635,7 +635,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
   const struct amd_sched_backend_ops *ops,
   unsigned hw_submission, long timeout, const char *name)
 {
-   int i;
+   int i, r;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -648,22 +648,37 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
+   r = amd_sched_entity_init(sched, &sched->recover_entity,
+ &sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER],
+ hw_submission);
+   if (r)
+   return r;
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
-   if (!sched_fence_slab)
-   return -ENOMEM;
+   if (!sched_fence_slab) {
+   r = -ENOMEM;
+   goto err1;
+   }
}
 
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
DRM_ERROR("Failed to create scheduler for %s.\n", name);
-   return PTR_ERR(sched->thread);
+   r = PTR_ERR(sched->thread);
+   goto err2;
}
 
return 0;
+err2:
+   if (atomic_dec_and_test(&sched_fence_slab_ref))
+   kmem_cache_destroy(sched_fence_slab);
+
+err1:
+   amd_sched_entity_fini(sched, &sched->recover_entity);
+   return r;
 }
 
 /**
@@ -677,4 +692,5 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
kthread_stop(sched->thread);
if (atomic_dec_and_test(&sched_fence_slab_ref))
kmem_cache_destroy(sched_fence_slab);
+   amd_sched_entity_fini(sched, &sched->recover_entity);
 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index cd87bc7..8245316 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -133,6 +133,7 @@ struct amd_gpu_scheduler {
struct task_struct  *thread;
struct list_headring_mirror_list;
spinlock_t  job_list_lock;
+   struct amd_sched_entity recover_entity;
 };
 
 int amd_sched_init(struct amd_gpu_scheduler *sched,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/11] drm/amd: hw job list should be exact

2016-08-02 Thread Chunming Zhou
hw job list should be exact, so deleting job node should be in irq
handler instead of work thread.
And Calculating time of next job should be immediate as well.

Change-Id: I6d2686d84be3e7077300df7181c2a284fbcda9eb
Signed-off-by: Chunming Zhou 
Reviewed-by: Edward O'Callaghan 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 38 +--
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9f4fa6e..0444df0 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -367,34 +367,32 @@ static void amd_sched_job_finish(struct work_struct *work)
struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
   finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
-   unsigned long flags;
-
-   /* remove job from ring_mirror_list */
-   spin_lock_irqsave(&sched->job_list_lock, flags);
-   list_del_init(&s_job->node);
-   if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-   struct amd_sched_job *next;
 
-   spin_unlock_irqrestore(&sched->job_list_lock, flags);
+   if (sched->timeout != MAX_SCHEDULE_TIMEOUT)
cancel_delayed_work_sync(&s_job->work_tdr);
-   spin_lock_irqsave(&sched->job_list_lock, flags);
-
-   /* queue TDR for next job */
-   next = list_first_entry_or_null(&sched->ring_mirror_list,
-   struct amd_sched_job, node);
 
-   if (next)
-   schedule_delayed_work(&next->work_tdr, sched->timeout);
-   }
-   spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched->ops->free_job(s_job);
 }
 
 static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
 {
-   struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
-finish_cb);
-   schedule_work(&job->finish_work);
+   struct amd_sched_job *s_job = container_of(cb, struct amd_sched_job,
+  finish_cb);
+   struct amd_gpu_scheduler *sched = s_job->sched;
+   struct amd_sched_job *next;
+   unsigned long flags;
+
+   /* remove job from ring_mirror_list */
+   spin_lock_irqsave(&sched->job_list_lock, flags);
+   list_del_init(&s_job->node);
+   /* queue TDR for next job */
+   next = list_first_entry_or_null(&sched->ring_mirror_list,
+   struct amd_sched_job, node);
+   spin_unlock_irqrestore(&sched->job_list_lock, flags);
+   if ((sched->timeout != MAX_SCHEDULE_TIMEOUT) && next)
+   schedule_delayed_work(&next->work_tdr, sched->timeout);
+
+   schedule_work(&s_job->finish_work);
 }
 
 static void amd_sched_job_begin(struct amd_sched_job *s_job)
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/10] drm/amdgpu: abstract amdgpu_bo_create_shadow

2016-08-02 Thread Chunming Zhou
Change-Id: Id0e89f350a05f8668ea00e3fff8c0bd6f3049cec
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 40 --
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  3 +++
 2 files changed, 30 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6ecf16..cc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -380,6 +380,32 @@ fail_free:
return r;
 }
 
+int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+   unsigned long size, int byte_align,
+   struct amdgpu_bo *bo)
+{
+   struct ttm_placement placement = {0};
+   struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+
+   if (bo->shadow)
+   return 0;
+
+   bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+   memset(&placements, 0,
+  (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+   amdgpu_ttm_placement_init(adev, &placement,
+ placements, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+   return amdgpu_bo_create_restricted(adev, size, byte_align, true,
+  AMDGPU_GEM_DOMAIN_GTT,
+  AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+  NULL, &placement,
+  bo->tbo.resv,
+  &bo->shadow);
+}
+
 int amdgpu_bo_create(struct amdgpu_device *adev,
 unsigned long size, int byte_align,
 bool kernel, u32 domain, u64 flags,
@@ -404,19 +430,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
 
if (flags & AMDGPU_GEM_CREATE_SHADOW) {
-   memset(&placements, 0,
-  (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
-
-   amdgpu_ttm_placement_init(adev, &placement,
- placements, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC);
-
-   r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
-   AMDGPU_GEM_DOMAIN_GTT,
-   AMDGPU_GEM_CREATE_CPU_GTT_USWC,
-   NULL, &placement,
-   (*bo_ptr)->tbo.resv,
-   &(*bo_ptr)->shadow);
+   r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
if (r)
amdgpu_bo_unref(bo_ptr);
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index d650b42..b994fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -117,6 +117,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct sg_table *sg,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+   unsigned long size, int byte_align,
+   struct amdgpu_bo *bo);
 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/10] drm/amdgpu: make need_backup generic

2016-08-02 Thread Chunming Zhou
It will be used other place.

Change-Id: I213faf16e25a95bef4c45a65ab21f4d61db4ef41
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b1a4af0..daf07ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -975,6 +975,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+bool amdgpu_vm_need_backup(struct amdgpu_device *adev);
 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 struct list_head *validated,
 struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a34d94a..01dd888 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -112,7 +112,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
 }
 
-static bool amdgpu_vm_need_backup(struct amdgpu_device *adev)
+bool amdgpu_vm_need_backup(struct amdgpu_device *adev)
 {
if (adev->flags & AMD_IS_APU)
return false;
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/10] GART table recovery

2016-08-02 Thread Chunming Zhou
gart table is stored in one bo which must be ready before gart init, but the 
shadow bo must be created after gart is ready, so they cannot be created at a 
same time. shado bo itself aslo is included in gart table, So shadow bo needs a 
synchronization after device init. After sync, the contents of bo and shadwo bo 
will be same, and be updated at a same time. Then we will be able to recover 
gart table from shadow bo when gpu full reset.

patch10 is a fix for memory leak.

Chunming Zhou (10):
  drm/amdgpu: make need_backup generic
  drm/amdgpu: implement gart late_init/fini
  drm/amdgpu: add gart_late_init/fini to gmc V7/8
  drm/amdgpu: abstract amdgpu_bo_create_shadow
  drm/amdgpu: shadow gart table support
  drm/amdgpu: make recover_bo_from_shadow be generic
  drm/amdgpu: implement gart recovery
  drm/amdgpu: recover gart table first when full reset
  drm/amdgpu: sync gart table before initialization completed
  drm/amdgpu: fix memory leak of sched fence

 drivers/gpu/drm/amd/amdgpu/amdgpu.h|   9 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 139 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c|   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  80 ++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |   9 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |  50 ++-
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c  |  39 +++-
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c  |  40 -
 9 files changed, 304 insertions(+), 66 deletions(-)

-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/10] drm/amdgpu: implement gart recovery

2016-08-02 Thread Chunming Zhou
recover gart bo from its shadow bo.

Change-Id: Idbb91d62b1c3cf73f7d90b5f2c650f2690e5a42b
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h  |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 31 +++
 2 files changed, 32 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2985578d..3ee01fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -667,6 +667,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, 
unsigned offset,
 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
 int pages, struct page **pagelist,
 dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_gart_table_recover_from_shadow(struct amdgpu_device *adev);
 
 /*
  * GPU MC structures, functions & helpers
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b306684..baeaee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -135,6 +135,37 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device 
*adev)
return 0;
 }
 
+int amdgpu_gart_table_recover_from_shadow(struct amdgpu_device *adev)
+{
+   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+   struct fence *fence;
+   int r;
+
+   if (!amdgpu_vm_need_backup(adev))
+   return 0;
+   /* bo and shadow use same resv, so reserve one time */
+   r = amdgpu_bo_reserve(adev->gart.robj, false);
+   if (unlikely(r != 0))
+   return r;
+
+   r = amdgpu_bo_recover_bo_from_shadow(adev, ring,
+&adev->gart.recover_entity,
+adev->gart.robj,
+NULL, &fence);
+   amdgpu_bo_unreserve(adev->gart.robj);
+   if (r) {
+   DRM_ERROR("recover page table failed!\n");
+   goto err;
+   }
+
+   if (fence)
+   r = fence_wait(fence, false);
+   fence_put(fence);
+
+err:
+   return r;
+}
+
 /**
  * amdgpu_gart_table_vram_pin - pin gart page table in vram
  *
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/10] drm/amdgpu: make recover_bo_from_shadow be generic

2016-08-02 Thread Chunming Zhou
Change-Id: I74758b9ca84058f3f2db5509822d8aad840d283e
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 40 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  6 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 48 --
 3 files changed, 51 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cc9..1d0bdfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -439,6 +439,46 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
 }
 
+int amdgpu_bo_recover_bo_from_shadow(struct amdgpu_device *adev,
+struct amdgpu_ring *ring,
+struct amd_sched_entity *entity,
+struct amdgpu_bo *bo,
+struct reservation_object *resv,
+struct fence **fence)
+
+{
+   int r;
+   uint64_t vram_addr, gtt_addr;
+
+   r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+   if (r) {
+   DRM_ERROR("Failed to pin bo object\n");
+   goto err1;
+   }
+   r = amdgpu_bo_pin(bo->shadow, AMDGPU_GEM_DOMAIN_GTT, >t_addr);
+   if (r) {
+   DRM_ERROR("Failed to pin bo shadow object\n");
+   goto err2;
+   }
+
+   r = reservation_object_reserve_shared(bo->tbo.resv);
+   if (r)
+   goto err3;
+
+   r = amdgpu_copy_buffer(ring, entity, gtt_addr,
+  vram_addr, amdgpu_bo_size(bo), resv, fence);
+   if (!r)
+   amdgpu_bo_fence(bo, *fence, true);
+
+err3:
+   amdgpu_bo_unpin(bo->shadow);
+err2:
+   amdgpu_bo_unpin(bo);
+err1:
+
+   return r;
+}
+
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 {
bool is_iomem;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index b994fd4..f35fd68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -158,6 +158,12 @@ int amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 bool shared);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_recover_bo_from_shadow(struct amdgpu_device *adev,
+struct amdgpu_ring *ring,
+struct amd_sched_entity *entity,
+struct amdgpu_bo *bo,
+struct reservation_object *resv,
+struct fence **fence);
 
 /*
  * sub allocation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 01dd888..3eecddc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -713,46 +713,6 @@ error_free:
return r;
 }
 
-static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
-   struct amdgpu_vm *vm,
-   struct amdgpu_bo *bo,
-   struct amdgpu_bo *bo_shadow,
-   struct reservation_object *resv,
-   struct fence **fence)
-
-{
-   int r;
-   uint64_t vram_addr, gtt_addr;
-
-   r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
-   if (r) {
-   DRM_ERROR("Failed to pin bo object\n");
-   goto err1;
-   }
-   r = amdgpu_bo_pin(bo_shadow, AMDGPU_GEM_DOMAIN_GTT, >t_addr);
-   if (r) {
-   DRM_ERROR("Failed to pin bo shadow object\n");
-   goto err2;
-   }
-
-   r = reservation_object_reserve_shared(bo->tbo.resv);
-   if (r)
-   goto err3;
-
-   r = amdgpu_copy_buffer(vm->ring, &vm->recover_entity, gtt_addr,
-  vram_addr, amdgpu_bo_size(bo), resv, fence);
-   if (!r)
-   amdgpu_bo_fence(bo, *fence, true);
-
-err3:
-   amdgpu_bo_unpin(bo_shadow);
-err2:
-   amdgpu_bo_unpin(bo);
-err1:
-
-   return r;
-}
-
 int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
 struct amdgpu_vm *vm)
 {
@@ -767,8 +727,9 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
if (unlikely(r != 0))
return r;
 
-   r = amdgpu_vm_recover_bo_from_shadow(adev, vm, vm->page_directory,
-vm->page_directory->shadow,
+   r = amdgpu_bo_recover_bo_from_shadow(adev, vm->ring,
+&vm->recover_entity,
+  

[PATCH 05/10] drm/amdgpu: shadow gart table support

2016-08-02 Thread Chunming Zhou
allocate gart shadow bo, and using shadow bo to backup gart table.

Change-Id: Ib2beae9cea1ad1314c57f0fcdcc254816f39b9b2
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h  |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 47 +++-
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c| 15 ++
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c| 16 +++
 4 files changed, 80 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 419a33b..2985578d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -638,6 +638,7 @@ struct amdgpu_gart {
dma_addr_t  table_addr;
struct amdgpu_bo*robj;
void*ptr;
+   void*shadow_ptr;
unsignednum_gpu_pages;
unsignednum_cpu_pages;
unsignedtable_size;
@@ -655,6 +656,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device 
*adev);
 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_shadow_pin(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_shadow_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
 int amdgpu_gart_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index c1f226b..b306684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -248,6 +248,9 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, 
unsigned offset,
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
+   if (amdgpu_vm_need_backup(adev) && 
adev->gart.robj->shadow)
+   amdgpu_gart_set_pte_pde(adev, 
adev->gart.shadow_ptr,
+   t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
@@ -293,6 +296,9 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned 
offset,
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 
j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, 
t, page_base, flags);
+   if (amdgpu_vm_need_backup(adev) && 
adev->gart.robj->shadow)
+   amdgpu_gart_set_pte_pde(adev, 
adev->gart.shadow_ptr,
+   t, page_base, 
flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
@@ -364,6 +370,46 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
amdgpu_dummy_page_fini(adev);
 }
 
+int amdgpu_gart_table_vram_shadow_pin(struct amdgpu_device *adev)
+{
+   uint64_t gpu_addr;
+   int r;
+
+   if (!adev->gart.robj->shadow)
+   return -EINVAL;
+
+   r = amdgpu_bo_reserve(adev->gart.robj->shadow, false);
+   if (unlikely(r != 0))
+   return r;
+   r = amdgpu_bo_pin(adev->gart.robj->shadow,
+   AMDGPU_GEM_DOMAIN_GTT, &gpu_addr);
+   if (r) {
+   amdgpu_bo_unreserve(adev->gart.robj->shadow);
+   return r;
+   }
+   r = amdgpu_bo_kmap(adev->gart.robj->shadow, &adev->gart.shadow_ptr);
+   if (r)
+   amdgpu_bo_unpin(adev->gart.robj->shadow);
+   amdgpu_bo_unreserve(adev->gart.robj->shadow);
+   return r;
+}
+
+void amdgpu_gart_table_vram_shadow_unpin(struct amdgpu_device *adev)
+{
+   int r;
+
+   if (adev->gart.robj->shadow == NULL)
+   return;
+
+   r = amdgpu_bo_reserve(adev->gart.robj->shadow, false);
+   if (likely(r == 0)) {
+   amdgpu_bo_kunmap(adev->gart.robj->shadow);
+   amdgpu_bo_unpin(adev->gart.robj->shadow);
+   amdgpu_bo_unreserve(adev->gart.robj->shadow);
+   adev->gart.shadow_ptr = NULL;
+   }
+}
+
 int amdgpu_gart_late_init(struct amdgpu_device *adev)
 {
struct amd_sched_rq *rq;
@@ -372,7 +418,6 @@ int amdgpu_gart_late_init(struct amdgpu_device *adev)
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER];
return amd_sched_entity_init(&ring->sched, &adev->gart.recover_entity,
 rq, amdgpu_sched_jobs);
-
 }
 
 void amdgpu_gart_late_fini(struct amdgpu_dev

[PATCH 08/10] drm/amdgpu: recover gart table first when full reset

2016-08-02 Thread Chunming Zhou
Change-Id: Iad7a90646dbb5df930a8ba177ce6bdc48415ff7d
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b7b4cf8..16ba37d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2191,6 +2191,8 @@ retry:
kthread_unpark(ring->sched.thread);
unpark_bits |= 1 << ring->idx;
}
+   DRM_INFO("recover gart table first\n");
+   amdgpu_gart_table_recover_from_shadow(adev);
 
spin_lock(&adev->vm_list_lock);
list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) 
{
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/10] drm/amdgpu: add gart_late_init/fini to gmc V7/8

2016-08-02 Thread Chunming Zhou
Change-Id: I47b132d1ac5ed57f5805f759d5698948c35721ba
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 24 
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 24 
 2 files changed, 40 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0b0f086..0771c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -887,11 +887,26 @@ static int gmc_v7_0_early_init(void *handle)
 static int gmc_v7_0_late_init(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+   int r;
 
-   if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
-   return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
-   else
-   return 0;
+   r = amdgpu_gart_late_init(adev);
+   if (r)
+   return r;
+
+   if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) {
+   r = amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+   if (r)
+   amdgpu_gart_late_fini(adev);
+   }
+
+   return r;
+}
+
+static void gmc_v7_0_late_fini(void *handle)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+   amdgpu_gart_late_fini(adev);
 }
 
 static int gmc_v7_0_sw_init(void *handle)
@@ -1242,6 +1257,7 @@ const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
+   .late_fini = gmc_v7_0_late_fini,
.sw_init = gmc_v7_0_sw_init,
.sw_fini = gmc_v7_0_sw_fini,
.hw_init = gmc_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 0a23b83..c26bee9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -876,11 +876,26 @@ static int gmc_v8_0_early_init(void *handle)
 static int gmc_v8_0_late_init(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+   int r;
 
-   if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
-   return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
-   else
-   return 0;
+   r = amdgpu_gart_late_init(adev);
+   if (r)
+   return r;
+
+   if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) {
+   r = amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+   if (r)
+   amdgpu_gart_late_fini(adev);
+   }
+
+   return r;
+}
+
+static void gmc_v8_0_late_fini(void *handle)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+   amdgpu_gart_late_fini(adev);
 }
 
 #define mmMC_SEQ_MISC0_FIJI 0xA71
@@ -1434,6 +1449,7 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
+   .late_fini = gmc_v8_0_late_fini,
.sw_init = gmc_v8_0_sw_init,
.sw_fini = gmc_v8_0_sw_fini,
.hw_init = gmc_v8_0_hw_init,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/10] drm/amdgpu: sync gart table before initialization completed

2016-08-02 Thread Chunming Zhou
Since the shadow is in GTT, shadow itslef pte isn't in shadow,
We need to do sync before initialization is completed

Change-Id: I29b433da6c71fc790a32ef202dd85a72ab6b5787
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h  |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 47 +++-
 2 files changed, 47 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3ee01fe..4cad4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -648,6 +648,7 @@ struct amdgpu_gart {
boolready;
const struct amdgpu_gart_funcs *gart_funcs;
struct amd_sched_entity recover_entity;
+   u64 shadow_gpu_addr;
 };
 
 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index baeaee2..e99c8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -421,10 +421,46 @@ int amdgpu_gart_table_vram_shadow_pin(struct 
amdgpu_device *adev)
r = amdgpu_bo_kmap(adev->gart.robj->shadow, &adev->gart.shadow_ptr);
if (r)
amdgpu_bo_unpin(adev->gart.robj->shadow);
+   adev->gart.shadow_gpu_addr = gpu_addr;
amdgpu_bo_unreserve(adev->gart.robj->shadow);
return r;
 }
 
+/* Since the shadow is in GTT, shadow itslef pte isn't in shadow,
+   We need to do sync before initialization is completed */
+static int amdgpu_gart_table_shadow_sync(struct amdgpu_device *adev)
+{
+   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+   struct amd_sched_entity *entity = &adev->gart.recover_entity;
+   struct fence *fence;
+   u64 vram_addr = adev->gart.table_addr;
+   u64 shadow_addr = adev->gart.shadow_gpu_addr;
+   int r;
+
+   if (!adev->gart.ready) {
+   DRM_ERROR("cannot sync gart table for shadow.\n");
+   return -EINVAL;
+   }
+   if (!amdgpu_vm_need_backup(adev) || !adev->gart.robj ||
+   !adev->gart.robj->shadow)
+   return 0;
+   r = amdgpu_bo_reserve(adev->gart.robj->shadow, false);
+   if (unlikely(r != 0))
+   return r;
+   /* if adev->gart.ready, means both gart bo and shadow bo are pinned */
+   r = amdgpu_copy_buffer(ring, entity, vram_addr,
+  shadow_addr, amdgpu_bo_size(adev->gart.robj),
+  adev->gart.robj->tbo.resv, &fence);
+   if (!r)
+   amdgpu_bo_fence(adev->gart.robj, fence, true);
+
+   amdgpu_bo_unreserve(adev->gart.robj->shadow);
+   r = fence_wait(fence, true);
+   fence_put(fence);
+
+   return r;
+}
+
 void amdgpu_gart_table_vram_shadow_unpin(struct amdgpu_device *adev)
 {
int r;
@@ -445,10 +481,19 @@ int amdgpu_gart_late_init(struct amdgpu_device *adev)
 {
struct amd_sched_rq *rq;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+   int r;
 
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER];
-   return amd_sched_entity_init(&ring->sched, &adev->gart.recover_entity,
+   r = amd_sched_entity_init(&ring->sched, &adev->gart.recover_entity,
 rq, amdgpu_sched_jobs);
+   if (r)
+   return r;
+   r = amdgpu_gart_table_shadow_sync(adev);
+   if (r) {
+   DRM_ERROR("sync gart table failed (%d).\n", r);
+   amd_sched_entity_fini(&ring->sched, &adev->gart.recover_entity);
+   }
+   return r;
 }
 
 void amdgpu_gart_late_fini(struct amdgpu_device *adev)
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/10] drm/amdgpu: implement gart late_init/fini

2016-08-02 Thread Chunming Zhou
add recovery entity to gart.

Change-Id: Ieb400c8a731ef25619ea3c0b5198a6e7ce56580e
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h  |  3 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 18 ++
 2 files changed, 21 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index daf07ff..419a33b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -646,6 +646,7 @@ struct amdgpu_gart {
 #endif
boolready;
const struct amdgpu_gart_funcs *gart_funcs;
+   struct amd_sched_entity recover_entity;
 };
 
 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
@@ -656,6 +657,8 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
+int amdgpu_gart_late_init(struct amdgpu_device *adev);
+void amdgpu_gart_late_fini(struct amdgpu_device *adev);
 void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
int pages);
 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2..c1f226b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -363,3 +363,21 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
 #endif
amdgpu_dummy_page_fini(adev);
 }
+
+int amdgpu_gart_late_init(struct amdgpu_device *adev)
+{
+   struct amd_sched_rq *rq;
+   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+   rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER];
+   return amd_sched_entity_init(&ring->sched, &adev->gart.recover_entity,
+rq, amdgpu_sched_jobs);
+
+}
+
+void amdgpu_gart_late_fini(struct amdgpu_device *adev)
+{
+   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+   amd_sched_entity_fini(&ring->sched, &adev->gart.recover_entity);
+}
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 10/10] drm/amdgpu: fix memory leak of sched fence

2016-08-02 Thread Chunming Zhou
amdgpu_job_free_resources is already called by submit_job.
If it is called in run_job, the sched fence could be got twice in sa bo free,
then memory leak happens.

Change-Id: I833612e31cf22b62174f3f76546fd11c9ea38780
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8d87a9a..d56247d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -183,7 +183,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job 
*sched_job)
/* if gpu reset, hw fence will be replaced here */
fence_put(job->fence);
job->fence = fence_get(fence);
-   amdgpu_job_free_resources(job);
+
return fence;
 }
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH xf86-video-ati] Keep track of damage event related flushes per-client v2

2016-08-02 Thread Michel Dänzer
From: Michel Dänzer 

This further reduces the compositing slowdown due to flushing overhead,
by only flushing when the X server actually sends XDamageNotify events
to a client, and there hasn't been a flush yet in the meantime.

v2: Use ScreenPrivateKey, fixes invalid memory access with GPU screens
Signed-off-by: Michel Dänzer 
---
 src/radeon.h |  5 -
 src/radeon_kms.c | 41 +
 2 files changed, 37 insertions(+), 9 deletions(-)

diff --git a/src/radeon.h b/src/radeon.h
index 25ff61c..f3a3e1c 100644
--- a/src/radeon.h
+++ b/src/radeon.h
@@ -448,6 +448,10 @@ struct radeon_accel_state {
 Bool  force;
 };
 
+struct radeon_client_priv {
+uint_fast32_t needs_flush;
+};
+
 typedef struct {
 EntityInfoPtr pEnt;
 pciVideoPtr   PciInfo;
@@ -474,7 +478,6 @@ typedef struct {
 Bool  allowColorTiling;
 Bool  allowColorTiling2D;
 int   callback_event_type;
-uint_fast32_t callback_needs_flush;
 uint_fast32_t gpu_flushed;
 uint_fast32_t gpu_synced;
 struct radeon_accel_state *accel_state;
diff --git a/src/radeon_kms.c b/src/radeon_kms.c
index da11358..f525a2c 100644
--- a/src/radeon_kms.c
+++ b/src/radeon_kms.c
@@ -40,6 +40,7 @@
 
 #include "radeon_version.h"
 #include "shadow.h"
+#include 
 
 #include "atipciids.h"
 
@@ -59,6 +60,8 @@
 #include "radeon_cs_gem.h"
 #include "radeon_vbo.h"
 
+static DevScreenPrivateKeyRec radeon_client_private_key;
+
 extern SymTabRec RADEONChipsets[];
 static Bool radeon_setup_kernel_mem(ScreenPtr pScreen);
 
@@ -241,9 +244,9 @@ radeonUpdatePacked(ScreenPtr pScreen, shadowBufPtr pBuf)
 }
 
 static Bool
-callback_needs_flush(RADEONInfoPtr info)
+callback_needs_flush(RADEONInfoPtr info, struct radeon_client_priv 
*client_priv)
 {
-return (int)(info->callback_needs_flush - info->gpu_flushed) > 0;
+return (int)(client_priv->needs_flush - info->gpu_flushed) > 0;
 }
 
 static void
@@ -252,20 +255,30 @@ radeon_event_callback(CallbackListPtr *list,
 {
 EventInfoRec *eventinfo = call_data;
 ScrnInfoPtr pScrn = user_data;
+ScreenPtr pScreen = pScrn->pScreen;
+struct radeon_client_priv *client_priv =
+   dixLookupScreenPrivate(&eventinfo->client->devPrivates,
+  &radeon_client_private_key, pScreen);
+struct radeon_client_priv *server_priv =
+   dixLookupScreenPrivate(&serverClient->devPrivates,
+  &radeon_client_private_key, pScreen);
 RADEONInfoPtr info = RADEONPTR(pScrn);
 int i;
 
-if (callback_needs_flush(info))
+if (callback_needs_flush(info, client_priv) ||
+   callback_needs_flush(info, server_priv))
return;
 
-/* Don't let gpu_flushed get too far ahead of callback_needs_flush,
- * in order to prevent false positives in callback_needs_flush()
+/* Don't let gpu_flushed get too far ahead of needs_flush, in order
+ * to prevent false positives in callback_needs_flush()
  */
-info->callback_needs_flush = info->gpu_flushed;
+client_priv->needs_flush = info->gpu_flushed;
+server_priv->needs_flush = info->gpu_flushed;
 
 for (i = 0; i < eventinfo->count; i++) {
if (eventinfo->events[i].u.u.type == info->callback_event_type) {
-   info->callback_needs_flush++;
+   client_priv->needs_flush++;
+   server_priv->needs_flush++;
return;
}
 }
@@ -276,9 +289,14 @@ radeon_flush_callback(CallbackListPtr *list,
  pointer user_data, pointer call_data)
 {
 ScrnInfoPtr pScrn = user_data;
+ScreenPtr pScreen = pScrn->pScreen;
+ClientPtr client = call_data ? call_data : serverClient;
+struct radeon_client_priv *client_priv =
+   dixLookupScreenPrivate(&client->devPrivates,
+  &radeon_client_private_key, pScreen);
 RADEONInfoPtr info = RADEONPTR(pScrn);
 
-if (pScrn->vtSema && callback_needs_flush(info))
+if (pScrn->vtSema && callback_needs_flush(info, client_priv))
 radeon_cs_flush_indirect(pScrn);
 }
 
@@ -351,6 +369,13 @@ static Bool RADEONCreateScreenResources_KMS(ScreenPtr 
pScreen)
DeleteCallback(&FlushCallback, radeon_flush_callback, pScrn);
return FALSE;
}
+
+   if (!dixRegisterScreenPrivateKey(&radeon_client_private_key, pScreen,
+PRIVATE_CLIENT, sizeof(struct 
radeon_client_priv))) {
+   DeleteCallback(&FlushCallback, radeon_flush_callback, pScrn);
+   DeleteCallback(&EventCallback, radeon_event_callback, pScrn);
+   return FALSE;
+   }
 }
 
 return TRUE;
-- 
2.8.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


cz_clockpowergating.c powerplay unused functions

2016-08-02 Thread StDenis, Tom
Are there plans for the functions *_phm_*() at the top of 
cz_clockpowergating.c?  They seem to be copy/pasted from other drivers and 
likewise are not hooked up to anything (nor complete).


Tom
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


答复: [PATCH v2 3/3] drm/amdgpu: update golden setting of polaris10

2016-08-02 Thread Wang, Ken
Reviewed-by: Ken Wang 


发件人: Huang Rui 
发送时间: 2016年8月2日 13:23:34
收件人: amd-gfx@lists.freedesktop.org; Deucher, Alexander
抄送: Wang, Ken; Huan, Alvin; Huang, Ray
主题: [PATCH v2 3/3] drm/amdgpu: update golden setting of polaris10

Signed-off-by: Huang Rui 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ea025a5..05c336b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -301,8 +301,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-   mmCB_HW_CONTROL, 0xfffdf3cf, 0x7208,
-   mmCB_HW_CONTROL_2, 0, 0x0f00,
+   mmCB_HW_CONTROL, 0x0001f3cf, 0x7208,
+   mmCB_HW_CONTROL_2, 0x0f00, 0x0f00,
 mmCB_HW_CONTROL_3, 0x01ff, 0x0040,
 mmDB_DEBUG2, 0xf00f, 0x0400,
 mmPA_SC_ENHANCE, 0x, 0x2001,
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


答复: [PATCH v2 2/3] drm/amdgpu: update golden setting of polaris11

2016-08-02 Thread Wang, Ken
Reviewed-by: Ken Wang 


发件人: Huang Rui 
发送时间: 2016年8月2日 13:23:33
收件人: amd-gfx@lists.freedesktop.org; Deucher, Alexander
抄送: Wang, Ken; Huan, Alvin; Huang, Ray
主题: [PATCH v2 2/3] drm/amdgpu: update golden setting of polaris11

Signed-off-by: Huang Rui 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc4d998..ea025a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -269,7 +269,8 @@ static const u32 tonga_mgcg_cgcg_init[] =

 static const u32 golden_settings_polaris11_a11[] =
 {
-   mmCB_HW_CONTROL, 0xfffdf3cf, 0x6208,
+   mmCB_HW_CONTROL, 0xf3cf, 0x7208,
+   mmCB_HW_CONTROL_2, 0x0f00, 0x0f00,
 mmCB_HW_CONTROL_3, 0x01ff, 0x0040,
 mmDB_DEBUG2, 0xf00f, 0x0400,
 mmPA_SC_ENHANCE, 0x, 0x2001,
@@ -278,7 +279,7 @@ static const u32 golden_settings_polaris11_a11[] =
 mmPA_SC_RASTER_CONFIG_1, 0x003f, 0x,
 mmRLC_CGCG_CGLS_CTRL, 0x0003, 0x0001003c,
 mmRLC_CGCG_CGLS_CTRL_3D, 0x, 0x0001003c,
-   mmSQ_CONFIG, 0x07f8, 0x0718,
+   mmSQ_CONFIG, 0x07f8, 0x0118,
 mmTA_CNTL_AUX, 0x000f000f, 0x000b,
 mmTCC_CTRL, 0x0010, 0xf31fff7f,
 mmTCP_ADDR_CONFIG, 0x03ff, 0x00f3,
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


答复: [PATCH v2 1/3] drm/amdgpu: update golden setting of tonga

2016-08-02 Thread Wang, Ken
Reviewed-by: Ken Wang 


发件人: Huang Rui 
发送时间: 2016年8月2日 13:23:32
收件人: amd-gfx@lists.freedesktop.org; Deucher, Alexander
抄送: Wang, Ken; Huan, Alvin; Huang, Ray
主题: [PATCH v2 1/3] drm/amdgpu: update golden setting of tonga

Signed-off-by: Huang Rui 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index aff318a..fc4d998 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -165,6 +165,7 @@ static const u32 golden_settings_tonga_a11[] =
 mmPA_SC_ENHANCE, 0x, 0x2001,
 mmPA_SC_FIFO_DEPTH_CNTL, 0x03ff, 0x00fc,
 mmPA_SC_LINE_STIPPLE_STATE, 0xff0f, 0x,
+   mmRLC_CGCG_CGLS_CTRL, 0x0003, 0x003c,
 mmSQ_RANDOM_WAVE_PRI, 0x001f, 0x06fd,
 mmTA_CNTL_AUX, 0x000f000f, 0x000b,
 mmTCC_CTRL, 0x0010, 0xf31fff7f,
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


答复: [PATCH 2/2] drm/amdgpu: use the kernel zone memory size as the max remote memory in amdgpu

2016-08-02 Thread Wang, Ken
Yeah, that's could be an issue, I have changed the logic and send the review 
again.


发件人: Alex Deucher 
发送时间: 2016年8月2日 12:56:07
收件人: Wang, Ken
抄送: amd-gfx list
主题: Re: [PATCH 2/2] drm/amdgpu: use the kernel zone memory size as the max 
remote memory in amdgpu

On Wed, Jul 27, 2016 at 7:21 AM, Ken Wang  wrote:
> Change-Id: Ibf193cc2d9e20c3aefa1ce8ff24241dfbb6768ff
> Signed-off-by: Ken Wang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  2 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++-
>  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c   |  7 ++-
>  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c   |  7 ++-
>  4 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 8bef7ec..b84153f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -2432,6 +2432,8 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device 
> *adev, struct ttm_tt *ttm,
>  void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, 
> u64 base);
>  void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>  void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
> +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
> +int amdgpu_ttm_global_init(struct amdgpu_device *adev);
>  void amdgpu_program_register_sequence(struct amdgpu_device *adev,
>  const u32 *registers,
>  const u32 array_size);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 145732a..5200381 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -34,6 +34,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct 
> drm_global_reference *ref)
> ttm_mem_global_release(ref->object);
>  }
>
> -static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
> +int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>  {
> struct drm_global_reference *global_ref;
> struct amdgpu_ring *ring;
> @@ -998,10 +999,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
> unsigned i, j;
> int r;
>
> -   r = amdgpu_ttm_global_init(adev);
> -   if (r) {
> -   return r;
> -   }
> /* No others user of address space so set it to 0 */
> r = ttm_bo_device_init(&adev->mman.bdev,
>adev->mman.bo_global_ref.ref.object,
> @@ -1398,3 +1395,8 @@ static void amdgpu_ttm_debugfs_fini(struct 
> amdgpu_device *adev)
>
>  #endif
>  }
> +
> +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
> +{
> +   return 
> ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> index a3b6048..b6ae925 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> @@ -392,7 +392,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
>  * size equal to the 1024 or vram, whichever is larger.
>  */
> if (amdgpu_gart_size == -1)
> -   adev->mc.gtt_size = max((1024ULL << 20), 
> adev->mc.mc_vram_size);
> +   adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);

Do we want to limit this to the min(amdgpu_ttm_get_gtt_mem_size, GPU
address space - vram)?  GPU address space is only 40 bits.  You could
theoretically have systems with a lot of system memory.

Alex


> else
> adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
>
> @@ -959,6 +959,11 @@ static int gmc_v7_0_sw_init(void *handle)
> return r;
> }
>
> +   r = amdgpu_ttm_global_init(adev);
> +   if (r) {
> +   return r;
> +   }
> +
> r = gmc_v7_0_mc_init(adev);
> if (r)
> return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> index 02b6872..d46d76b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> @@ -469,7 +469,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
>  * size equal to the 1024 or vram, whichever is larger.
>  */
> if (amdgpu_gart_size == -1)
> -   adev->mc.gtt_size = max((1024ULL << 20), 
> adev->mc.mc_vram_size);
> +   adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
> else
> adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
>
> @@ -956,6 +956,11 @@ static int gmc_v8_0_sw_init(void *handle)
> return r;
> }
>
> +   r = amdgpu_ttm_global_init(adev);
> +   if (r) {
> + 

Re: [PATCH 2/2] drm/amdgpu: use the kernel zone memory size as the max remote memory in amdgpu

2016-08-02 Thread Alex Deucher
On Tue, Aug 2, 2016 at 1:19 AM, Ken Wang  wrote:
> Change-Id: Ibf193cc2d9e20c3aefa1ce8ff24241dfbb6768ff
> Signed-off-by: Ken Wang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  2 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++-
>  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c   |  7 ++-
>  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c   |  7 ++-
>  4 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index da9ee5e..a701b79 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -2450,6 +2450,8 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device 
> *adev, struct ttm_tt *ttm,
>  void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, 
> u64 base);
>  void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>  void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
> +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
> +int amdgpu_ttm_global_init(struct amdgpu_device *adev);
>  void amdgpu_program_register_sequence(struct amdgpu_device *adev,
>  const u32 *registers,
>  const u32 array_size);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index d739ecb..4b31a36 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -34,6 +34,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct 
> drm_global_reference *ref)
> ttm_mem_global_release(ref->object);
>  }
>
> -static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
> +int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>  {
> struct drm_global_reference *global_ref;
> struct amdgpu_ring *ring;
> @@ -1003,10 +1004,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
> unsigned i, j;
> int r;
>
> -   r = amdgpu_ttm_global_init(adev);
> -   if (r) {
> -   return r;
> -   }
> /* No others user of address space so set it to 0 */
> r = ttm_bo_device_init(&adev->mman.bdev,
>adev->mman.bo_global_ref.ref.object,
> @@ -1403,3 +1400,8 @@ static void amdgpu_ttm_debugfs_fini(struct 
> amdgpu_device *adev)
>
>  #endif
>  }
> +
> +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
> +{
> +   return 
> ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> index a3b6048..4d4b57f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> @@ -392,7 +392,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
>  * size equal to the 1024 or vram, whichever is larger.
>  */
> if (amdgpu_gart_size == -1)
> -   adev->mc.gtt_size = max((1024ULL << 20), 
> adev->mc.mc_vram_size);
> +   adev->mc.gtt_size = min(adev->mc.mc_vram_size, 
> amdgpu_ttm_get_gtt_mem_size(adev));


Shouldn't this be:
adev->mc.gtt_size = min(adev->mc.mc_mask - adev->mc.mc_vram_size,
amdgpu_ttm_get_gtt_mem_size(adev));

Looking closer at the code, amdgpu_gtt_location() will automatically
clamp the gtt size to the available address space to I think the
previous patch is fine as is.  With mc.gtt_size set to
amdgpu_ttm_get_gtt_mem_size(), the previous patch is:
Reviewed-by: Alex Deucher 

> else
> adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
>
> @@ -959,6 +959,11 @@ static int gmc_v7_0_sw_init(void *handle)
> return r;
> }
>
> +   r = amdgpu_ttm_global_init(adev);
> +   if (r) {
> +   return r;
> +   }
> +
> r = gmc_v7_0_mc_init(adev);
> if (r)
> return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> index 84d4f7f..30b3fd8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> @@ -469,7 +469,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
>  * size equal to the 1024 or vram, whichever is larger.
>  */
> if (amdgpu_gart_size == -1)
> -   adev->mc.gtt_size = max((1024ULL << 20), 
> adev->mc.mc_vram_size);
> +   adev->mc.gtt_size = min(adev->mc.mc_vram_size, 
> amdgpu_ttm_get_gtt_mem_size(adev));
> else
> adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
>
> @@ -956,6 +956,11 @@ static int gmc_v8_0_sw_init(void *handle)
> return r;
> }
>
> +   r = amdgpu_ttm_global_init(adev);
> +   if (r) {
> +   return r;
> +   }
> +
> 

RE: [PATCH] Add freesync ioctl interface

2016-08-02 Thread Deucher, Alexander
> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Zhang, Hawking
> Sent: Tuesday, August 02, 2016 12:14 AM
> To: Michel Dänzer
> Cc: amd-gfx@lists.freedesktop.org
> Subject: RE: [PATCH] Add freesync ioctl interface
> 
> The kernel branch has already merged the ioctl implementation. I'm okay to
> keep it internal although.

The kernel bits are in dal.  You can see them publically in my public mirror of 
amd-staging-4.6 IIRC.

Alex

> 
> 
> Regards,
> Hawking
> 
> -Original Message-
> From: Michel Dänzer [mailto:mic...@daenzer.net]
> Sent: Tuesday, August 02, 2016 11:32
> To: Zhang, Hawking 
> Cc: amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] Add freesync ioctl interface
> 
> On 02.08.2016 12:12, Zhang, Hawking wrote:
> > The implementation is as [PATCH] Enable/disable freesync when
> > enter/exit fullscreen game.
> 
> I mean an implementation of the ioctl in the kernel driver.
> 
> 
> P.S. Please run the following command in each Git repository, so that the
> [PATCH] prefix includes which repository a patch is for:
> 
>  git config format.subjectprefix "PATCH $(basename $PWD)"
> 
> --
> Earthling Michel Dänzer   |   http://www.amd.com
> Libre software enthusiast | Mesa and X developer
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 00/10] GART table recovery

2016-08-02 Thread Christian König

Well you have been hardworking during my vacation :)

Looks pretty good to me, but hope that I can get a closer look tomorrow.

Is there any particular order the three sets must be applied?

Regards,
Christian.

Am 02.08.2016 um 10:00 schrieb Chunming Zhou:

gart table is stored in one bo which must be ready before gart init, but the 
shadow bo must be created after gart is ready, so they cannot be created at a 
same time. shado bo itself aslo is included in gart table, So shadow bo needs a 
synchronization after device init. After sync, the contents of bo and shadwo bo 
will be same, and be updated at a same time. Then we will be able to recover 
gart table from shadow bo when gpu full reset.

patch10 is a fix for memory leak.

Chunming Zhou (10):
   drm/amdgpu: make need_backup generic
   drm/amdgpu: implement gart late_init/fini
   drm/amdgpu: add gart_late_init/fini to gmc V7/8
   drm/amdgpu: abstract amdgpu_bo_create_shadow
   drm/amdgpu: shadow gart table support
   drm/amdgpu: make recover_bo_from_shadow be generic
   drm/amdgpu: implement gart recovery
   drm/amdgpu: recover gart table first when full reset
   drm/amdgpu: sync gart table before initialization completed
   drm/amdgpu: fix memory leak of sched fence

  drivers/gpu/drm/amd/amdgpu/amdgpu.h|   9 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   2 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 139 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c|   2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  80 ++---
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |   9 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |  50 ++-
  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c  |  39 +++-
  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c  |  40 -
  9 files changed, 304 insertions(+), 66 deletions(-)



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/ttm: add interface to export kernel_zone max memory size in ttm

2016-08-02 Thread Christian König

Am 02.08.2016 um 06:57 schrieb Alex Deucher:

On Wed, Jul 27, 2016 at 7:21 AM, Ken Wang  wrote:

Change-Id: I9f30b54365492b234a7f0887dd0c67a9817c3705
Signed-off-by: Ken Wang 

Adding dri-devel as well since ttm is a common component.

Reviewed-by: Alex Deucher 


A bit commit message would be nice, e.g. something like "Add a function 
to get the maximum memory used by the kernel zone."


With that fixed he patch is Reviewed-by: Christian König 
 as well.


Regards,
Christian.




---
  drivers/gpu/drm/ttm/ttm_memory.c | 6 ++
  include/drm/ttm/ttm_memory.h | 1 +
  2 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index a1803fb..29855be 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -600,3 +600,9 @@ size_t ttm_round_pot(size_t size)
 return 0;
  }
  EXPORT_SYMBOL(ttm_round_pot);
+
+uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
+{
+   return glob->zone_kernel->max_mem;
+}
+EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 72dcbe8..c452089 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -155,4 +155,5 @@ extern int ttm_mem_global_alloc_page(struct ttm_mem_global 
*glob,
  extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
  struct page *page);
  extern size_t ttm_round_pot(size_t size);
+extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
  #endif
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: status of MST ?

2016-08-02 Thread Kenneth Johansson

On 2016-08-01 18:25, Alex Deucher wrote:

On Mon, Aug 1, 2016 at 12:17 PM, Kenneth Johansson  wrote:

On 2016-08-01 17:12, Alex Deucher wrote:

On Fri, Jul 29, 2016 at 8:51 AM, Kenneth Johansson  wrote:

I have a 30" Dell MST monitor and It only uses 30HZ refresh rate when I
use
my R9 card. both the open source driver and amdgpu-pro driver.

Is there some way to make it work properly ?


It's hard to say what the problem might be without more information
about your monitor and setup.  File a bug and attach your xorg log and


Sure. I was just wondering if it is supposed to work. No point filing bugs
if it's not even supported.

Yes, MST is supported with the newer DAL modesetting code that is in
the pro stack, but not yet upstream.

Alex




So I retested the 16.30.3-306809 version of the amdgpu-pro on top of 
linux 4.4.0-33-generic from ubuntu.


One thing could you have a version string or something printed when the 
module is loaded so its easy to see what module has been loaded. maybe 
even compile time to make it really clear. this is only needed when 
loading modules built manually. maybe the dkms system has some way to 
add defines when compiling so a time stamp can be added.


I guess I get the "pro" version as I saw this line.

[drm] DAL is enabled

Do you also want the "pro" bug on bugs.freedesktop.org or do amd have 
another bug tracker for pro stuff.


I attach dmesg to this mail as I suspect there should the detection of 
MST be happening. the xorg log was over 1MB in size so I save it for the 
bug report.






[0.00] microcode: CPU0 microcode updated early to revision 0x9e, date = 
2016-06-22
[0.00] Initializing cgroup subsys cpuset
[0.00] Initializing cgroup subsys cpu
[0.00] Initializing cgroup subsys cpuacct
[0.00] Linux version 4.4.0-33-generic (buildd@lgw01-03) (gcc version 
5.3.1 20160413 (Ubuntu 5.3.1-14ubuntu2.1) ) #52-Ubuntu SMP Fri Jul 22 19:16:44 
UTC 2016 (Ubuntu 4.4.0-33.52-generic 4.4.15)
[0.00] Command line: BOOT_IMAGE=/boot/vmlinuz-4.4.0-33-generic 
root=UUID=75bb5d92-206d-4bb8-a405-150b9b93d6d3 ro quiet splash vt.handoff=7
[0.00] KERNEL supported cpus:
[0.00]   Intel GenuineIntel
[0.00]   AMD AuthenticAMD
[0.00]   Centaur CentaurHauls
[0.00] x86/fpu: xstate_offset[2]:  576, xstate_sizes[2]:  256
[0.00] x86/fpu: xstate_offset[3]:  960, xstate_sizes[3]:   64
[0.00] x86/fpu: xstate_offset[4]: 1024, xstate_sizes[4]:   64
[0.00] x86/fpu: Supporting XSAVE feature 0x01: 'x87 floating point 
registers'
[0.00] x86/fpu: Supporting XSAVE feature 0x02: 'SSE registers'
[0.00] x86/fpu: Supporting XSAVE feature 0x04: 'AVX registers'
[0.00] x86/fpu: Supporting XSAVE feature 0x08: 'MPX bounds registers'
[0.00] x86/fpu: Supporting XSAVE feature 0x10: 'MPX CSR'
[0.00] x86/fpu: Enabled xstate features 0x1f, context size is 1088 
bytes, using 'standard' format.
[0.00] x86/fpu: Using 'eager' FPU context switches.
[0.00] e820: BIOS-provided physical RAM map:
[0.00] BIOS-e820: [mem 0x-0x0009c7ff] usable
[0.00] BIOS-e820: [mem 0x0009c800-0x0009] reserved
[0.00] BIOS-e820: [mem 0x000e-0x000f] reserved
[0.00] BIOS-e820: [mem 0x0010-0x86b51fff] usable
[0.00] BIOS-e820: [mem 0x86b52000-0x86b52fff] ACPI NVS
[0.00] BIOS-e820: [mem 0x86b53000-0x86b9cfff] reserved
[0.00] BIOS-e820: [mem 0x86b9d000-0x8e633fff] usable
[0.00] BIOS-e820: [mem 0x8e634000-0x8e96afff] reserved
[0.00] BIOS-e820: [mem 0x8e96b000-0x8eb70fff] usable
[0.00] BIOS-e820: [mem 0x8eb71000-0x8f2cafff] ACPI NVS
[0.00] BIOS-e820: [mem 0x8f2cb000-0x8fbfefff] reserved
[0.00] BIOS-e820: [mem 0x8fbff000-0x8fbf] usable
[0.00] BIOS-e820: [mem 0xe000-0xefff] reserved
[0.00] BIOS-e820: [mem 0xfe00-0xfe010fff] reserved
[0.00] BIOS-e820: [mem 0xfec0-0xfec00fff] reserved
[0.00] BIOS-e820: [mem 0xfee0-0xfee00fff] reserved
[0.00] BIOS-e820: [mem 0xff00-0x] reserved
[0.00] BIOS-e820: [mem 0x0001-0x000c6eff] usable
[0.00] NX (Execute Disable) protection: active
[0.00] SMBIOS 3.0 present.
[0.00] DMI: MSI MS-7976/Z170A GAMING M7 (MS-7976), BIOS 1.B0 02/22/2016
[0.00] e820: update [mem 0x-0x0fff] usable ==> reserved
[0.00] e820: remove [mem 0x000a-0x000f] usable
[0.00] e820: last_pfn = 0xc6f000 max_arch_pfn = 0x4
[0.00] MTRR default type: write-back
[0.00] MTRR fixed ranges enabled:
[0.00]   0-9 write-b

Re: [PATCH] Add freesync ioctl interface

2016-08-02 Thread Dave Airlie
On 2 August 2016 at 12:26, Hawking Zhang  wrote:
> Change-Id: I38cb3a80e75a904cee875ae47bc0a39a3d471aca
> Signed-off-by: Hawking Zhang 
> ---
>  include/drm/amdgpu_drm.h | 15 +++
>  1 file changed, 15 insertions(+)
>
> diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
> index 46a3c40..a4f816c 100644
> --- a/include/drm/amdgpu_drm.h
> +++ b/include/drm/amdgpu_drm.h
> @@ -48,6 +48,7 @@
>  #define DRM_AMDGPU_GEM_USERPTR 0x11
>  #define DRM_AMDGPU_WAIT_FENCES 0x12
>  #define DRM_AMDGPU_GEM_FIND_BO  0x13
> +#define DRM_AMDGPU_FREESYNC 0x14
>
>  #define DRM_IOCTL_AMDGPU_GEM_CREATEDRM_IOWR(DRM_COMMAND_BASE + 
> DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>  #define DRM_IOCTL_AMDGPU_GEM_MMAP  DRM_IOWR(DRM_COMMAND_BASE + 
> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -63,6 +64,7 @@
>  #define DRM_IOCTL_AMDGPU_GEM_USERPTR   DRM_IOWR(DRM_COMMAND_BASE + 
> DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
>  #define DRM_IOCTL_AMDGPU_WAIT_FENCES   DRM_IOWR(DRM_COMMAND_BASE + 
> DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>  #define DRM_IOCTL_AMDGPU_GEM_FIND_BO  DRM_IOWR(DRM_COMMAND_BASE + 
> DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
> +#define DRM_IOCTL_AMDGPU_FREESYNC   DRM_IOWR(DRM_COMMAND_BASE + 
> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>
>  #define AMDGPU_GEM_DOMAIN_CPU  0x1
>  #define AMDGPU_GEM_DOMAIN_GTT  0x2
> @@ -706,4 +708,17 @@ struct drm_amdgpu_virtual_range {
> uint64_t start;
> uint64_t end;
>  };
> +
> +/*
> + * Definition of free sync enter and exit signals
> + * We may have more options in the future
> + */
> +#define AMDGPU_FREESYNC_FULLSCREEN_ENTER1
> +#define AMDGPU_FREESYNC_FULLSCREEN_EXIT 2
> +
> +struct drm_amdgpu_freesync {
> +__u32 op;   /* AMDGPU_FREESYNC_FULLSCREEN_ENTER 
> or */
> +/* AMDGPU_FREESYNC_FULLSCREEN_ENTER 
> */
> +__u32 spare[7];
> +};
>  #endif

Isn't freesync meant to be a generic non-driver useful thing?

This should be integrated with atomic modesetting API or just the KMS APIs.

Dave.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 00/10] GART table recovery

2016-08-02 Thread zhoucm1



On 2016年08月02日 23:15, Christian König wrote:

Well you have been hardworking during my vacation :)

Looks pretty good to me, but hope that I can get a closer look tomorrow.

Is there any particular order the three sets must be applied?

they are depending on my development order:
1. [PATCH 00/13] shadow page table support
2. [PATCH 00/11] add recovery entity and run queue
3. [PATCH 00/10] GART table recovery

Thanks,
David Zhou


Regards,
Christian.

Am 02.08.2016 um 10:00 schrieb Chunming Zhou:
gart table is stored in one bo which must be ready before gart init, 
but the shadow bo must be created after gart is ready, so they cannot 
be created at a same time. shado bo itself aslo is included in gart 
table, So shadow bo needs a synchronization after device init. After 
sync, the contents of bo and shadwo bo will be same, and be updated 
at a same time. Then we will be able to recover gart table from 
shadow bo when gpu full reset.


patch10 is a fix for memory leak.

Chunming Zhou (10):
   drm/amdgpu: make need_backup generic
   drm/amdgpu: implement gart late_init/fini
   drm/amdgpu: add gart_late_init/fini to gmc V7/8
   drm/amdgpu: abstract amdgpu_bo_create_shadow
   drm/amdgpu: shadow gart table support
   drm/amdgpu: make recover_bo_from_shadow be generic
   drm/amdgpu: implement gart recovery
   drm/amdgpu: recover gart table first when full reset
   drm/amdgpu: sync gart table before initialization completed
   drm/amdgpu: fix memory leak of sched fence

  drivers/gpu/drm/amd/amdgpu/amdgpu.h|   9 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   2 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 139 
+

  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c|   2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  80 ++---
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |   9 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |  50 ++-
  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c  |  39 +++-
  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c  |  40 -
  9 files changed, 304 insertions(+), 66 deletions(-)





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx